ARTS 2.5.0 (git: 9ee3ac6c)
m_optproperties.cc
Go to the documentation of this file.
1/* Copyright (C) 2002-2012
2 Sreerekha T.R. <rekha@uni-bremen.de>
3 Claudia Emde <claudia.emde@dlr.de>
4 Cory Davies <cory@met.ed.ac.uk>
5
6 This program is free software; you can redistribute it and/or
7 modify it under the terms of the GNU General Public License as
8 published by the Free Software Foundation; either version 2 of the
9 License, or (at your option) any later version.
10
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
19 USA. */
20
35/*===========================================================================
36 === External declarations
37 ===========================================================================*/
38
39#include <cfloat>
40#include <cmath>
41#include "array.h"
42#include "arts.h"
43#include "auto_md.h"
44#include "check_input.h"
45#include "exceptions.h"
46#include "interpolation.h"
48#include "logic.h"
49#include "math_funcs.h"
50#include "matpackIII.h"
51#include "matpackVII.h"
52#include "messages.h"
53#include "montecarlo.h"
54#include "optproperties.h"
55#include "sorting.h"
56#include "xml_io.h"
57
58extern const Numeric PI;
59extern const Numeric DEG2RAD;
60extern const Numeric RAD2DEG;
61
62#define PART_TYPE scat_data[i_ss][i_se].ptype
63#define F_DATAGRID scat_data[i_ss][i_se].f_grid
64#define T_DATAGRID scat_data[i_ss][i_se].T_grid
65#define ZA_DATAGRID scat_data[i_ss][i_se].za_grid
66#define AA_DATAGRID scat_data[i_ss][i_se].aa_grid
67#define PHA_MAT_DATA scat_data[i_ss][i_se].pha_mat_data
68#define EXT_MAT_DATA scat_data[i_ss][i_se].ext_mat_data
69#define ABS_VEC_DATA scat_data[i_ss][i_se].abs_vec_data
70
71// If particle number density is below this value,
72// no transformations will be performed
73#define PND_LIMIT 1e-12
74
75/* Workspace method: Doxygen documentation will be auto-generated */
76void pha_mat_sptFromData( // Output:
77 Tensor5& pha_mat_spt,
78 // Input:
80 const Vector& za_grid,
81 const Vector& aa_grid,
82 const Index& za_index, // propagation directions
83 const Index& aa_index,
84 const Index& f_index,
85 const Vector& f_grid,
86 const Numeric& rtp_temperature,
87 const Tensor4& pnd_field,
88 const Index& scat_p_index,
89 const Index& scat_lat_index,
90 const Index& scat_lon_index,
91 const Verbosity& verbosity) {
92 const Index stokes_dim = pha_mat_spt.ncols();
93 if (stokes_dim > 4 || stokes_dim < 1) {
94 throw runtime_error(
95 "The dimension of the stokes vector \n"
96 "must be 1,2,3 or 4");
97 }
98
99 // Determine total number of scattering elements
100 const Index N_se_total = TotalNumberOfElements(scat_data);
101 if (N_se_total != pnd_field.nbooks()) {
102 ostringstream os;
103 os << "Total number of scattering elements in scat_data "
104 << "inconsistent with size of pnd_field.";
105 throw runtime_error(os.str());
106 }
107 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
108 // checks the same as the runtime_error above. Still, we keep it to be on the
109 // save side.
110 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
111
112 // Check that we don't have scat_data_mono here. Only checking the first
113 // scat element, assuming the other elements have been processed in the same
114 // manner. That's save against having mono data, but not against having
115 // individual elements produced with only a single frequency. This, however,
116 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
117 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
118 // (partly) produced from scat_data_singleTmatrix.
119 if (scat_data[0][0].f_grid.nelem() < 2) {
120 ostringstream os;
121 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
122 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
123 << "is expected here.";
124 throw runtime_error(os.str());
125 }
126
127 const Index N_ss = scat_data.nelem();
128
129 // Phase matrix in laboratory coordinate system. Dimensions:
130 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
131 Tensor5 pha_mat_data_int;
132
133 Index i_se_flat = 0;
134 // Loop over scattering species
135 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
136 const Index N_se = scat_data[i_ss].nelem();
137
138 // Loop over the included scattering elements
139 for (Index i_se = 0; i_se < N_se; i_se++) {
140 // If the particle number density at a specific point in the
141 // atmosphere for the i_se scattering element is zero, we don't need
142 // to do the transfromation!
143 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
144 PND_LIMIT) {
145 // First we have to transform the data from the coordinate system
146 // used in the database (depending on the kind of ptype) to the
147 // laboratory coordinate system.
148
149 // Frequency and temperature interpolation:
150
151 // Container for data at one frequency and one temperature.
152 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
153 PHA_MAT_DATA.nbooks(),
154 PHA_MAT_DATA.npages(),
155 PHA_MAT_DATA.nrows(),
156 PHA_MAT_DATA.ncols());
157
158 // Gridpositions:
159 GridPos freq_gp;
160 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
161 GridPos t_gp;
162 Vector itw;
163
164 Index ti = -1;
165
166 if (PHA_MAT_DATA.nvitrines() == 1) // just 1 T_grid element
167 {
168 ti = 0;
169 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
170 // pick one temperature'
171 {
172 if (rtp_temperature > -10.) // lowest T-point
173 {
174 ti = 0;
175 } else if (rtp_temperature > -20.) // highest T-point
176 {
177 ti = T_DATAGRID.nelem() - 1;
178 } else // median T-point
179 {
180 ti = T_DATAGRID.nelem() / 2;
181 }
182 }
183
184 if (ti < 0) // temperature interpolation
185 {
186 ostringstream os;
187 os << "In pha_mat_sptFromData.\n"
188 << "The temperature grid of the scattering data does not\n"
189 << "cover the atmospheric temperature at cloud location.\n"
190 << "The data should include the value T = " << rtp_temperature
191 << " K.";
192 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
193
194 gridpos(t_gp, T_DATAGRID, rtp_temperature);
195
196 // Interpolation weights:
197 itw.resize(4);
198 interpweights(itw, freq_gp, t_gp);
199
200 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
201 i_za_sca++)
202 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
203 i_aa_sca++)
204 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
205 i_za_inc++)
206 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
207 i_aa_inc++)
208 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
209 // Interpolation of phase matrix:
210 pha_mat_data_int(
211 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
212 interp(itw,
214 joker,
215 i_za_sca,
216 i_aa_sca,
217 i_za_inc,
218 i_aa_inc,
219 i),
220 freq_gp,
221 t_gp);
222 } else {
223 // Interpolation weights:
224 itw.resize(2);
225 interpweights(itw, freq_gp);
226 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
227 i_za_sca++)
228 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
229 i_aa_sca++)
230 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
231 i_za_inc++)
232 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
233 i_aa_inc++)
234 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
235 // Interpolation of phase matrix:
236 pha_mat_data_int(
237 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
238 interp(itw,
240 ti,
241 i_za_sca,
242 i_aa_sca,
243 i_za_inc,
244 i_aa_inc,
245 i),
246 freq_gp);
247 }
248
249 // Do the transformation into the laboratory coordinate system.
250 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
251 za_inc_idx++) {
252 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
253 aa_inc_idx++) {
255 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
256 pha_mat_data_int,
259 PART_TYPE,
260 za_index,
261 aa_index,
262 za_inc_idx,
263 aa_inc_idx,
264 za_grid,
265 aa_grid,
266 verbosity);
267 }
268 }
269 }
270 i_se_flat++;
271 }
272 }
273}
274
275/* Workspace method: Doxygen documentation will be auto-generated */
277 Tensor5& pha_mat_spt,
278 // Input:
279 const ArrayOfTensor7& pha_mat_sptDOITOpt,
280 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
281 const Index& doit_za_grid_size,
282 const Vector& aa_grid,
283 const Index& za_index, // propagation directions
284 const Index& aa_index,
285 const Numeric& rtp_temperature,
286 const Tensor4& pnd_field,
287 const Index& scat_p_index,
288 const Index& scat_lat_index,
289 const Index& scat_lon_index,
290 const Verbosity&) {
291 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
292
293 if (N_se_total != pnd_field.nbooks()) {
294 ostringstream os;
295 os << "Total number of scattering elements in scat_data_mono "
296 << "inconsistent with size of pnd_field.";
297 throw runtime_error(os.str());
298 }
299 // as pha_mat_spt is typically initiallized from pnd_field, this theoretically
300 // checks the same as the runtime_error above. Still, we keep it to be on the
301 // save side.
302 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
303
304 // atmosphere_dim = 3
305 if (pnd_field.ncols() > 1) {
306 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == N_se_total);
307 // Assuming that if the size is o.k. for one scattering element, it will
308 // also be o.k. for the other scattering elements.
309 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
310 scat_data_mono[0][0].T_grid.nelem());
311 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
312 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == aa_grid.nelem());
313 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
314 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
315 }
316
317 // atmosphere_dim = 1, only zenith angle required for scattered directions.
318 else if (pnd_field.ncols() == 1) {
319 //ARTS_ASSERT(is_size(scat_theta, doit_za_grid_size, 1,
320 // doit_za_grid_size, aa_grid.nelem()));
321
322 ARTS_ASSERT(pha_mat_sptDOITOpt.nelem() == TotalNumberOfElements(scat_data_mono));
323 // Assuming that if the size is o.k. for one scattering element, it will
324 // also be o.k. for the other scattering elements.
325 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nlibraries() ==
326 scat_data_mono[0][0].T_grid.nelem());
327 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nvitrines() == doit_za_grid_size);
328 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nshelves() == 1);
329 ARTS_ASSERT(pha_mat_sptDOITOpt[0].nbooks() == doit_za_grid_size);
330 ARTS_ASSERT(pha_mat_sptDOITOpt[0].npages() == aa_grid.nelem());
331 }
332
333 ARTS_ASSERT(doit_za_grid_size > 0);
334
335 // Check that we do indeed have scat_data_mono here. Only checking the first
336 // scat element, assuming the other elements have been processed in the same
337 // manner. That's save against having scat_data here if that originated from
338 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
339 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
340 // produced from scat_data_singleTmatrix. That would be too costly here,
341 // though.
342 // Also, we can't check here whether data is at the correct frequency since we
343 // don't know f_grid and f_index here (we could pass it in, though).
344 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
345 ostringstream os;
346 os << "Scattering data seems to be *scat_data* (several freq points),\n"
347 << "but *scat_data_mono* (1 freq point only) is expected here.";
348 throw runtime_error(os.str());
349 }
350
351 // Create equidistant zenith angle grid
352 Vector za_grid;
353 nlinspace(za_grid, 0, 180, doit_za_grid_size);
354
355 const Index N_ss = scat_data_mono.nelem();
356 const Index stokes_dim = pha_mat_spt.ncols();
357
358 if (stokes_dim > 4 || stokes_dim < 1) {
359 throw runtime_error(
360 "The dimension of the stokes vector \n"
361 "must be 1,2,3 or 4");
362 }
363
364 GridPos T_gp;
365 Vector itw(2);
366
367 // Initialisation
368 pha_mat_spt = 0.;
369
370 Index i_se_flat = 0;
371
372 // Do the transformation into the laboratory coordinate system.
373 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
374 const Index N_se = scat_data_mono[i_ss].nelem();
375
376 for (Index i_se = 0; i_se < N_se; i_se++) {
377 // If the particle number density at a specific point in the
378 // atmosphere for the i_se scattering element is zero, we don't need
379 // to do the transformation!
380 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
381 PND_LIMIT) //TRS
382 {
383 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
384 Index ti = -1;
385
386 if (nT == 1) // just 1 T_grid element
387 {
388 ti = 0;
389 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
390 // pick one temperature'
391 {
392 if (rtp_temperature > -10.) // lowest T-point
393 {
394 ti = 0;
395 } else if (rtp_temperature > -20.) // highest T-point
396 {
397 ti = nT - 1;
398 } else // median T-point
399 {
400 ti = nT / 2;
401 }
402 } else {
403 ostringstream os;
404 os << "In pha_mat_sptFromDataDOITOpt.\n"
405 << "The temperature grid of the scattering data does not\n"
406 << "cover the atmospheric temperature at cloud location.\n"
407 << "The data should include the value T = " << rtp_temperature
408 << " K.";
410 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
411
412 // Gridpositions:
413 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
414 // Interpolation weights:
415 interpweights(itw, T_gp);
416 }
417
418 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
419 za_inc_idx++) {
420 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
421 aa_inc_idx++) {
422 if (ti < 0) // Temperature interpolation
423 {
424 for (Index i = 0; i < stokes_dim; i++) {
425 for (Index j = 0; j < stokes_dim; j++) {
426 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
427 interp(itw,
428 pha_mat_sptDOITOpt[i_se_flat](joker,
429 za_index,
430 aa_index,
431 za_inc_idx,
432 aa_inc_idx,
433 i,
434 j),
435 T_gp);
436 }
437 }
438 } else {
439 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker) =
440 pha_mat_sptDOITOpt[i_se_flat](ti,
441 za_index,
442 aa_index,
443 za_inc_idx,
444 aa_inc_idx,
445 joker,
446 joker);
447 }
448 }
449 }
450 } // TRS
451
452 i_se_flat++;
453 }
454 }
455}
456
457/* Workspace method: Doxygen documentation will be auto-generated */
458void opt_prop_sptFromData( // Output and Input:
459 ArrayOfPropagationMatrix& ext_mat_spt,
460 ArrayOfStokesVector& abs_vec_spt,
461 // Input:
462 const ArrayOfArrayOfSingleScatteringData& scat_data,
463 const Vector& za_grid,
464 const Vector& aa_grid,
465 const Index& za_index, // propagation directions
466 const Index& aa_index,
467 const Index& f_index,
468 const Vector& f_grid,
469 const Numeric& rtp_temperature,
470 const Tensor4& pnd_field,
471 const Index& scat_p_index,
472 const Index& scat_lat_index,
473 const Index& scat_lon_index,
474 const Verbosity& verbosity) {
475 const Index N_ss = scat_data.nelem();
476 const Numeric za_sca = za_grid[za_index];
477 const Numeric aa_sca = aa_grid[aa_index];
478
479 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);
480 if (N_ss) {
481 ARTS_ASSERT(ext_mat_spt[0].NumberOfFrequencies() == N_se_total);
482 ARTS_ASSERT(abs_vec_spt[0].NumberOfFrequencies() == N_se_total);
483 });
484
485 // Check that we don't have scat_data_mono here. Only checking the first
486 // scat element, assuming the other elements have been processed in the same
487 // manner. That's save against having mono data, but not against having
488 // individual elements produced with only a single frequency. This, however,
489 // has been checked by scat_data_raw reading routines (ScatSpecies/Element*Add/Read).
490 // Unsafe, however, remain when ReadXML is used directly or if scat_data(_raw) is
491 // (partly) produced from scat_data_singleTmatrix.
492 if (scat_data[0][0].f_grid.nelem() < 2) {
493 ostringstream os;
494 os << "Scattering data seems to be *scat_data_mono* (1 freq point only),\n"
495 << "but frequency interpolable data (*scat_data* with >=2 freq points) "
496 << "is expected here.";
497 throw runtime_error(os.str());
498 }
499
500 // Phase matrix in laboratory coordinate system. Dimensions:
501 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
502 Tensor3 ext_mat_data_int;
503 Tensor3 abs_vec_data_int;
504
505 // Initialisation
506 ext_mat_spt = 0.;
507 abs_vec_spt = 0.;
508
509 Index i_se_flat = 0;
510 // Loop over the included scattering species
511 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
512 const Index N_se = scat_data[i_ss].nelem();
513
514 // Loop over the included scattering elements
515 for (Index i_se = 0; i_se < N_se; i_se++) {
516 // If the particle number density at a specific point in the
517 // atmosphere for the i_se scattering element is zero, we don't need
518 // to do the transformation
519
520 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
521 PND_LIMIT) {
522 // First we have to transform the data from the coordinate system
523 // used in the database (depending on the kind of ptype) to the
524 // laboratory coordinate system.
525
526 // Frequency interpolation:
527
528 // The data is interpolated on one frequency.
529 //
530 // Resize the variables for the interpolated data:
531 //
532 ext_mat_data_int.resize(
533 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
534 //
535 abs_vec_data_int.resize(
536 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
537
538 // Gridpositions:
539 GridPos freq_gp;
540 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
541 GridPos t_gp;
542 Vector itw;
543
544 if (T_DATAGRID.nelem() > 1) {
545 ostringstream os;
546 os << "In opt_prop_sptFromData.\n"
547 << "The temperature grid of the scattering data does not\n"
548 << "cover the atmospheric temperature at cloud location.\n"
549 << "The data should include the value T = " << rtp_temperature
550 << " K.";
551 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
552
553 gridpos(t_gp, T_DATAGRID, rtp_temperature);
554
555 // Interpolation weights:
556 itw.resize(4);
557 interpweights(itw, freq_gp, t_gp);
558
559 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
560 i_za_sca++) {
561 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
562 i_aa_sca++) {
563 //
564 // Interpolation of extinction matrix:
565 //
566 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
567 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
568 interp(itw,
569 EXT_MAT_DATA(joker, joker, i_za_sca, i_aa_sca, i),
570 freq_gp,
571 t_gp);
572 }
573 }
574 }
575
576 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
577 i_za_sca++) {
578 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
579 i_aa_sca++) {
580 //
581 // Interpolation of absorption vector:
582 //
583 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
584 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
585 interp(itw,
586 ABS_VEC_DATA(joker, joker, i_za_sca, i_aa_sca, i),
587 freq_gp,
588 t_gp);
589 }
590 }
591 }
592 } else {
593 // Interpolation weights:
594 itw.resize(2);
595 interpweights(itw, freq_gp);
596
597 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
598 i_za_sca++) {
599 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
600 i_aa_sca++) {
601 //
602 // Interpolation of extinction matrix:
603 //
604 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
605 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
606 interp(itw,
607 EXT_MAT_DATA(joker, 0, i_za_sca, i_aa_sca, i),
608 freq_gp);
609 }
610 }
611 }
612
613 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
614 i_za_sca++) {
615 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
616 i_aa_sca++) {
617 //
618 // Interpolation of absorption vector:
619 //
620 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
621 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
622 interp(itw,
623 ABS_VEC_DATA(joker, 0, i_za_sca, i_aa_sca, i),
624 freq_gp);
625 }
626 }
627 }
628 }
629
630 //
631 // Do the transformation into the laboratory coordinate system.
632 //
633 // Extinction matrix:
634 //
635 ext_matTransform(ext_mat_spt[i_se_flat],
636 ext_mat_data_int,
639 PART_TYPE,
640 za_sca,
641 aa_sca,
642 verbosity);
643 //
644 // Absorption vector:
645 //
646 abs_vecTransform(abs_vec_spt[i_se_flat],
647 abs_vec_data_int,
650 PART_TYPE,
651 za_sca,
652 aa_sca,
653 verbosity);
654 }
655
656 i_se_flat++;
657 }
658 }
659}
660
661/* Workspace method: Doxygen documentation will be auto-generated */
662void opt_prop_sptFromScat_data( // Output and Input:
663 ArrayOfPropagationMatrix& ext_mat_spt,
664 ArrayOfStokesVector& abs_vec_spt,
665 // Input:
666 const ArrayOfArrayOfSingleScatteringData& scat_data,
667 const Index& scat_data_checked,
668 const Vector& za_grid,
669 const Vector& aa_grid,
670 const Index& za_index, // propagation directions
671 const Index& aa_index,
672 const Index& f_index,
673 const Numeric& rtp_temperature,
674 const Tensor4& pnd_field,
675 const Index& scat_p_index,
676 const Index& scat_lat_index,
677 const Index& scat_lon_index,
678 const Verbosity& verbosity) {
679 if (scat_data_checked != 1)
680 throw runtime_error(
681 "The scattering data must be flagged to have "
682 "passed a consistency check (scat_data_checked=1).");
683
684 const Index N_ss = scat_data.nelem();
685 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
686 const Numeric za_sca = za_grid[za_index];
687 const Numeric aa_sca = aa_grid[aa_index];
688
689 if (stokes_dim > 4 || stokes_dim < 1) {
690 throw runtime_error(
691 "The dimension of the stokes vector \n"
692 "must be 1,2,3 or 4");
693 }
694
695 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data);)
696 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
697 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
698
699 // Phase matrix in laboratory coordinate system. Dimensions:
700 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
701 Tensor3 ext_mat_data_int;
702 Tensor3 abs_vec_data_int;
703
704 // Initialisation
705 for (auto& pm : ext_mat_spt) pm.SetZero();
706 for (auto& sv : abs_vec_spt) sv.SetZero();
707
708 Index this_f_index;
709
710 Index i_se_flat = 0;
711 // Loop over the included scattering species
712 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
713 const Index N_se = scat_data[i_ss].nelem();
714
715 // Loop over the included scattering elements
716 for (Index i_se = 0; i_se < N_se; i_se++) {
717 // If the particle number density at a specific point in the
718 // atmosphere for the i_se scattering element is zero, we don't need
719 // to do the transformation
720
721 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
722 PND_LIMIT) {
723 // First we have to transform the data from the coordinate system
724 // used in the database (depending on the kind of ptype) to the
725 // laboratory coordinate system.
726
727 // Resize the variables for the interpolated data (1freq, 1T):
728 ext_mat_data_int.resize(
729 EXT_MAT_DATA.npages(), EXT_MAT_DATA.nrows(), EXT_MAT_DATA.ncols());
730 abs_vec_data_int.resize(
731 ABS_VEC_DATA.npages(), ABS_VEC_DATA.nrows(), ABS_VEC_DATA.ncols());
732
733 // Gridpositions and interpolation weights;
734 GridPos t_gp;
735 Vector itw;
736 if (EXT_MAT_DATA.nbooks() > 1 || ABS_VEC_DATA.nbooks() > 1) {
737 ostringstream os;
738 os << "In opt_prop_sptFromScat_data.\n"
739 << "The temperature grid of the scattering data does not\n"
740 << "cover the atmospheric temperature at cloud location.\n"
741 << "The data should include the value T = " << rtp_temperature
742 << " K.";
743 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
744
745 gridpos(t_gp, T_DATAGRID, rtp_temperature);
746
747 // Interpolation weights:
748 itw.resize(2);
749 interpweights(itw, t_gp);
750 }
751
752 // Frequency extraction and temperature interpolation
753
754 if (EXT_MAT_DATA.nshelves() == 1)
755 this_f_index = 0;
756 else
757 this_f_index = f_index;
758
759 if (EXT_MAT_DATA.nbooks() > 1) {
760 // Interpolation of extinction matrix:
761 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
762 i_za_sca++) {
763 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
764 i_aa_sca++) {
765 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
766 ext_mat_data_int(i_za_sca, i_aa_sca, i) = interp(
767 itw,
768 EXT_MAT_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
769 t_gp);
770 }
771 }
772 }
773 } else {
774 ext_mat_data_int = EXT_MAT_DATA(this_f_index, 0, joker, joker, joker);
775 /*
776 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages();
777 i_za_sca++)
778 {
779 for(Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
780 i_aa_sca++)
781 {
782 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++)
783 {
784 ext_mat_data_int(i_za_sca, i_aa_sca, i) =
785 EXT_MAT_DATA(this_f_index, 0,
786 i_za_sca, i_aa_sca, i);
787 }
788 }
789 } */
790 }
791
792 if (ABS_VEC_DATA.nshelves() == 1)
793 this_f_index = 0;
794 else
795 this_f_index = f_index;
796
797 if (ABS_VEC_DATA.nbooks() > 1) {
798 // Interpolation of absorption vector:
799 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
800 i_za_sca++) {
801 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
802 i_aa_sca++) {
803 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
804 abs_vec_data_int(i_za_sca, i_aa_sca, i) = interp(
805 itw,
806 ABS_VEC_DATA(this_f_index, joker, i_za_sca, i_aa_sca, i),
807 t_gp);
808 }
809 }
810 }
811 } else {
812 abs_vec_data_int = ABS_VEC_DATA(this_f_index, 0, joker, joker, joker);
813 /*
814 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages();
815 i_za_sca++)
816 {
817 for(Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
818 i_aa_sca++)
819 {
820 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++)
821 {
822 abs_vec_data_int(i_za_sca, i_aa_sca, i) =
823 ABS_VEC_DATA(this_f_index, 0,
824 i_za_sca, i_aa_sca, i);
825 }
826 }
827 } */
828 }
829
830 //
831 // Do the transformation into the laboratory coordinate system.
832 //
833 // Extinction matrix:
834 ext_matTransform(ext_mat_spt[i_se_flat],
835 ext_mat_data_int,
838 PART_TYPE,
839 za_sca,
840 aa_sca,
841 verbosity);
842 // Absorption vector:
843 abs_vecTransform(abs_vec_spt[i_se_flat],
844 abs_vec_data_int,
847 PART_TYPE,
848 za_sca,
849 aa_sca,
850 verbosity);
851 }
852 i_se_flat++;
853 }
854 }
855}
856
857/* Workspace method: Doxygen documentation will be auto-generated */
858void opt_prop_bulkCalc( // Output and Input:
859 PropagationMatrix& ext_mat,
860 StokesVector& abs_vec,
861 // Input:
862 const ArrayOfPropagationMatrix& ext_mat_spt,
863 const ArrayOfStokesVector& abs_vec_spt,
864 const Tensor4& pnd_field,
865 const Index& scat_p_index,
866 const Index& scat_lat_index,
867 const Index& scat_lon_index,
868 const Verbosity&) {
869 Index N_se = abs_vec_spt.nelem();
870 //ARTS_ASSERT( ext_mat_spt.npages()==N_se )
871 if (ext_mat_spt.nelem() not_eq N_se) {
872 ostringstream os;
873 os << "Number of scattering elements in *abs_vec_spt* and *ext_mat_spt*\n"
874 << "does not agree.";
875 throw runtime_error(os.str());
876 }
877
878 Index stokes_dim = abs_vec_spt[0].StokesDimensions();
879 //ARTS_ASSERT( ext_mat_spt.ncols()==stokes_dim && ext_mat_spt.nrows()==stokes_dim )
880 if (ext_mat_spt[0].StokesDimensions() not_eq stokes_dim) {
881 ostringstream os;
882 os << "*stokes_dim* of *abs_vec_spt* and *ext_mat_spt* does not agree.";
883 throw runtime_error(os.str());
884 }
885 if (stokes_dim > 4 || stokes_dim < 1) {
886 ostringstream os;
887 os << "The dimension of stokes vector can only be 1, 2, 3, or 4.";
888 throw runtime_error(os.str());
889 }
890
891 ext_mat = PropagationMatrix(1, stokes_dim);
892 ext_mat.SetZero(); // Initialize to zero!
893 abs_vec = StokesVector(1, stokes_dim);
894 abs_vec.SetZero(); // Initialize to zero!
895
896 PropagationMatrix ext_mat_part(1, stokes_dim);
897 ext_mat_part.SetZero();
898 StokesVector abs_vec_part(1, stokes_dim);
899 abs_vec_part.SetZero();
900
901 // this is the loop over the different scattering elements
902 for (Index l = 0; l < N_se; l++) {
903 abs_vec_part.MultiplyAndAdd(
904 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
905 abs_vec_spt[l]);
906 ext_mat_part.MultiplyAndAdd(
907 pnd_field(l, scat_p_index, scat_lat_index, scat_lon_index),
908 ext_mat_spt[l]);
909 }
910
911 //Add absorption due single scattering element.
912 abs_vec += abs_vec_part;
913 //Add extinction matrix due single scattering element to *ext_mat*.
914 ext_mat += ext_mat_part;
915}
916
917/* Workspace method: Doxygen documentation will be auto-generated */
919 const PropagationMatrix& propmat_clearsky,
920 const Verbosity&) {
921 // Number of Stokes parameters:
922 const Index stokes_dim = ext_mat.StokesDimensions();
923
924 // The second dimension of ext_mat must also match the number of
925 // Stokes parameters:
926 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
927 "Col dimension of propmat_clearsky "
928 "inconsistent with col dimension in ext_mat.");
929
930 // Number of frequencies:
931 const Index f_dim = ext_mat.NumberOfFrequencies();
932
933 // This must be consistent with the second dimension of
934 // propmat_clearsky. Check this:
935 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
936 "Frequency dimension of ext_mat and propmat_clearsky\n"
937 "are inconsistent in ext_matAddGas.");
938
939 ext_mat += propmat_clearsky;
940}
941
942/* Workspace method: Doxygen documentation will be auto-generated */
944 const PropagationMatrix& propmat_clearsky,
945 const Verbosity&) {
946 // Number of frequencies:
947 const Index f_dim = abs_vec.NumberOfFrequencies();
948 const Index stokes_dim = abs_vec.StokesDimensions();
949
950 // This must be consistent with the second dimension of
951 // propmat_clearsky. Check this:
952 ARTS_USER_ERROR_IF (f_dim != propmat_clearsky.NumberOfFrequencies(),
953 "Frequency dimension of abs_vec and propmat_clearsky\n"
954 "are inconsistent in abs_vecAddGas.");
955 ARTS_USER_ERROR_IF (stokes_dim != propmat_clearsky.StokesDimensions(),
956 "Stokes dimension of abs_vec and propmat_clearsky\n"
957 "are inconsistent in abs_vecAddGas.");
958
959 // Loop all frequencies. Of course this includes the special case
960 // that there is only one frequency.
961 abs_vec += propmat_clearsky; // Defined to only add to the
962
963 // We don't have to do anything about higher elements of abs_vec,
964 // since scalar gas absorption only influences the first element.
965}
966
967/* Workspace method: Doxygen documentation will be auto-generated */
968/*
969void ext_matAddGasZeeman( Tensor3& ext_mat,
970 const Tensor3& ext_mat_zee,
971 const Verbosity&)
972{
973 // Number of Stokes parameters:
974 const Index stokes_dim = ext_mat.ncols();
975
976 // The second dimension of ext_mat must also match the number of
977 // Stokes parameters:
978 if ( stokes_dim != ext_mat.nrows() )
979 throw runtime_error("Row dimension of ext_mat inconsistent with "
980 "column dimension.");
981
982 for ( Index i=0; i<stokes_dim; ++i )
983 {
984 for ( Index j=0; j<stokes_dim; ++j )
985 {
986 // Add the zeeman extinction to extinction matrix.
987 ext_mat(joker,i,j) += ext_mat_zee(joker, i, j);
988 }
989
990 }
991}
992*/
993
994/* Workspace method: Doxygen documentation will be auto-generated */
995/*
996void abs_vecAddGasZeeman( Matrix& abs_vec,
997 const Matrix& abs_vec_zee,
998 const Verbosity&)
999{
1000 // Number of Stokes parameters:
1001 const Index stokes_dim = abs_vec_zee.ncols();
1002 // that there is only one frequency.
1003 for ( Index j=0; j<stokes_dim; ++j )
1004 {
1005 abs_vec(joker,j) += abs_vec_zee(joker,j);
1006 }
1007}
1008*/
1009
1010/* Workspace method: Doxygen documentation will be auto-generated */
1011void pha_matCalc(Tensor4& pha_mat,
1012 const Tensor5& pha_mat_spt,
1013 const Tensor4& pnd_field,
1014 const Index& atmosphere_dim,
1015 const Index& scat_p_index,
1016 const Index& scat_lat_index,
1017 const Index& scat_lon_index,
1018 const Verbosity&) {
1019 Index N_se = pha_mat_spt.nshelves();
1020 Index Nza = pha_mat_spt.nbooks();
1021 Index Naa = pha_mat_spt.npages();
1022 Index stokes_dim = pha_mat_spt.nrows();
1023
1024 pha_mat.resize(Nza, Naa, stokes_dim, stokes_dim);
1025
1026 // Initialisation
1027 pha_mat = 0.0;
1028
1029 Index ilat = 0;
1030 Index ilon = 0;
1031 if (atmosphere_dim > 1) ilat = scat_lat_index;
1032 if (atmosphere_dim > 2) ilon = scat_lon_index;
1033
1034 if (atmosphere_dim == 1) {
1035 // For 1d atmospheres, we additinally integrate the phase matrix over the
1036 // azimuth, because there is no azimuth dependency of the incoming
1037 // field.
1038
1039 Numeric grid_step_size_azimuth = 360. / (Numeric)(Naa - 1) * DEG2RAD;
1040
1041 // this is a loop over the different scattering elements
1042 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1043 // these are loops over zenith angle and azimuth angle
1044 for (Index za_index = 0; za_index < Nza; ++za_index)
1045 for (Index aa_index = 0; aa_index < Naa - 1; ++aa_index)
1046 // now the last two loops over the stokes dimension.
1047 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1048 ++stokes_index_1)
1049 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1050 ++stokes_index_2)
1051 //summation of the product of pnd_field and
1052 //pha_mat_spt.
1053 pha_mat(za_index, 0, stokes_index_1, stokes_index_2) +=
1054 ((pha_mat_spt(pt_index,
1055 za_index,
1056 aa_index,
1057 stokes_index_1,
1058 stokes_index_2) +
1059 pha_mat_spt(pt_index,
1060 za_index,
1061 aa_index + 1,
1062 stokes_index_1,
1063 stokes_index_2)) /
1064 2 * grid_step_size_azimuth *
1065 pnd_field(pt_index, scat_p_index, ilat, ilon));
1066 } else {
1067 // this is a loop over the different scattering elements
1068 for (Index pt_index = 0; pt_index < N_se; ++pt_index)
1069 // these are loops over zenith angle and azimuth angle
1070 for (Index za_index = 0; za_index < Nza; ++za_index)
1071 for (Index aa_index = 0; aa_index < Naa; ++aa_index)
1072 // now the last two loops over the stokes dimension.
1073 for (Index stokes_index_1 = 0; stokes_index_1 < stokes_dim;
1074 ++stokes_index_1)
1075 for (Index stokes_index_2 = 0; stokes_index_2 < stokes_dim;
1076 ++stokes_index_2)
1077 //summation of the product of pnd_field and
1078 //pha_mat_spt.
1079 pha_mat(za_index, aa_index, stokes_index_1, stokes_index_2) +=
1080 (pha_mat_spt(pt_index,
1081 za_index,
1082 aa_index,
1083 stokes_index_1,
1084 stokes_index_2) *
1085 pnd_field(pt_index, scat_p_index, ilat, ilon));
1086 }
1087}
1088
1089/* Workspace method: Doxygen documentation will be auto-generated */
1090void scat_dataCheck( //Input:
1091 const ArrayOfArrayOfSingleScatteringData& scat_data,
1092 const String& check_type,
1093 const Numeric& threshold,
1094 const Verbosity& verbosity) {
1098 //CREATE_OUT3;
1099
1100 // FIXME:
1101 // so far, this works for both scat_data and scat_data_raw. Needs to be
1102 // adapted, though, once we have WSM that can create Z/K/a with different
1103 // f/T dimensions than scat_data_single.f/T_grid.
1104
1105 const Index N_ss = scat_data.nelem();
1106
1107 // 1) any negative values in Z11, K11, or a1? K11>=a1?
1108 // 2) scat_data containing any NaN?
1109 // 3) sca_mat norm sufficiently good (int(Z11)~=K11-a1?)
1110
1111 // Loop over the included scattering species
1112 out2 << " checking for negative values in Z11, K11, and a1, and for K11<a1\n";
1113 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1114 const Index N_se = scat_data[i_ss].nelem();
1115
1116 // Loop over the included scattering elements
1117 for (Index i_se = 0; i_se < N_se; i_se++) {
1118 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1119 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1120 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1121 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1122 if (EXT_MAT_DATA(f, t, zai, aai, 0) < 0 ||
1123 ABS_VEC_DATA(f, t, zai, aai, 0) < 0) {
1124 ostringstream os;
1125 os << "Scatt. species #" << i_ss << " element #" << i_se
1126 << " contains negative K11 or a1 at f#" << f << ", T#" << t
1127 << ", za#" << zai << ", aa#" << aai << "\n";
1128 throw runtime_error(os.str());
1129 }
1130 if (EXT_MAT_DATA(f, t, zai, aai, 0) <
1131 ABS_VEC_DATA(f, t, zai, aai, 0)) {
1132 ostringstream os;
1133 os << "Scatt. species #" << i_ss << " element #" << i_se
1134 << " has K11<a1 at f#" << f << ", T#" << t << ", za#" << zai
1135 << ", aa#" << aai << "\n";
1136 throw runtime_error(os.str());
1137 }
1138 }
1139 // Since allowing pha_mat to have a single T entry only (while
1140 // T_grid, ext_mat, abs_vec have more), we need a separate T loop
1141 // for pha_mat
1142 Index nTpha = PHA_MAT_DATA.nvitrines();
1143 for (Index t = 0; t < nTpha; t++) {
1144 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1145 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1146 if (PHA_MAT_DATA(f, t, zas, aas, zai, aai, 0) < 0) {
1147 ostringstream os;
1148 os << "Scatt. species #" << i_ss << " element #" << i_se
1149 << " contains negative Z11 at f#" << f << ", T#" << t
1150 << " (of " << nTpha << "), za_sca#" << zas << ", aa_sca#"
1151 << aas << ", za_inc#" << zai << ", aa_inc#" << aai
1152 << "\n";
1153 throw runtime_error(os.str());
1154 }
1155 }
1156 }
1157 }
1158 }
1159 }
1160
1161 // Loop over the included scattering species
1162 out2 << " checking for NaN anywhere in Z, K, and a\n";
1163 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1164 const Index N_se = scat_data[i_ss].nelem();
1165
1166 // Loop over the included scattering elements
1167 for (Index i_se = 0; i_se < N_se; i_se++) {
1168 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1169 for (Index zai = 0; zai < ABS_VEC_DATA.npages(); zai++)
1170 for (Index aai = 0; aai < ABS_VEC_DATA.nrows(); aai++) {
1171 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1172 for (Index st = 0; st < ABS_VEC_DATA.ncols(); st++)
1173 if (std::isnan(ABS_VEC_DATA(f, t, zai, aai, st))) {
1174 ostringstream os;
1175 os << "Scatt. species #" << i_ss << " element #" << i_se
1176 << " contains NaN in abs_vec at f#" << f << ", T#" << t
1177 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1178 << "\n";
1179 throw runtime_error(os.str());
1180 }
1181 for (Index st = 0; st < EXT_MAT_DATA.ncols(); st++)
1182 if (std::isnan(EXT_MAT_DATA(f, t, zai, aai, st))) {
1183 ostringstream os;
1184 os << "Scatt. species #" << i_ss << " element #" << i_se
1185 << " contains NaN in ext_mat at f#" << f << ", T#" << t
1186 << ", za#" << zai << ", aa#" << aai << ", stokes #" << st
1187 << "\n";
1188 throw runtime_error(os.str());
1189 }
1190 }
1191 Index nTpha = PHA_MAT_DATA.nvitrines();
1192 for (Index t = 0; t < nTpha; t++) {
1193 for (Index zas = 0; zas < PHA_MAT_DATA.nshelves(); zas++)
1194 for (Index aas = 0; aas < PHA_MAT_DATA.nbooks(); aas++)
1195 for (Index st = 0; st < PHA_MAT_DATA.ncols(); st++)
1196 if (std::isnan(
1197 PHA_MAT_DATA(f, t, zas, aas, zai, aai, st))) {
1198 ostringstream os;
1199 os << "Scatt. species #" << i_ss << " element #" << i_se
1200 << " contains NaN in pha_mat at f#" << f << ", T#" << t
1201 << " (of " << nTpha << "), za_sca#" << zas
1202 << ", aa_sca#" << aas << ", za_inc#" << zai
1203 << ", aa_inc#" << aai << ", stokes #"
1204 << "\n";
1205 throw runtime_error(os.str());
1206 }
1207 }
1208 }
1209 }
1210 }
1211 }
1212
1213 if (check_type.toupper() == "ALL") {
1214 // Loop over the included scattering species
1215 out2 << " checking normalization of scattering matrix\n";
1216 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1217 const Index N_se = scat_data[i_ss].nelem();
1218
1219 // Loop over the included scattering elements
1220 for (Index i_se = 0; i_se < N_se; i_se++)
1221 if (T_DATAGRID.nelem() == PHA_MAT_DATA.nvitrines()) switch (PART_TYPE) {
1222 case PTYPE_TOTAL_RND: {
1223 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1224 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1226 PHA_MAT_DATA(f, t, joker, 0, 0, 0, 0), ZA_DATAGRID);
1227 Numeric Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1228 //Numeric Cabs = Cext_data - Csca;
1229 Numeric Cabs_data = ABS_VEC_DATA(f, t, 0, 0, 0);
1230 Numeric Csca_data = Cext_data - Cabs_data;
1231
1232 /*
1233 out3 << " Coefficients in database: "
1234 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1235 << " Csca: " << Csca_data << "\n"
1236 << " Calculated coefficients: "
1237 << "Cabs calc: " << Cabs
1238 << " Csca calc: " << Csca << "\n"
1239 << " Deviations "
1240 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1241 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1242 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1243 */
1244
1245 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1246 // below equivalent to the above
1247 // (it's actually the (absolute) albedo deviation!)
1248 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1249 ostringstream os;
1250 os << " Deviations in scat_data too large:\n"
1251 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1252 << " at nominal (actual) albedo of "
1253 << Csca_data / Cext_data << " (" << Csca / Cext_data
1254 << ").\n"
1255 << " Check entry for scattering element " << i_se
1256 << " of scattering species " << i_ss << " at " << f
1257 << ".frequency and " << t << ".temperature!\n";
1258 throw runtime_error(os.str());
1259 }
1260 }
1261 }
1262 break;
1263 }
1264
1265 case PTYPE_AZIMUTH_RND: {
1266 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1267 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1268 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1269 Numeric Csca =
1271 PHA_MAT_DATA(f, t, joker, joker, iza, 0, 0),
1273 AA_DATAGRID);
1274 Numeric Cext_data = EXT_MAT_DATA(f, t, iza, 0, 0);
1275 //Numeric Cabs = Cext_data - Csca;
1276 Numeric Cabs_data = ABS_VEC_DATA(f, t, iza, 0, 0);
1277 Numeric Csca_data = Cext_data - Cabs_data;
1278
1279 /*
1280 out3 << " Coefficients in database: "
1281 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1282 << " Csca: " << Csca_data << "\n"
1283 << " Calculated coefficients: "
1284 << "Cabs calc: " << Cabs
1285 << " Csca calc: " << Csca << "\n"
1286 << " Deviations "
1287 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1288 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1289 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1290 */
1291
1292 //if (abs(Csca/Csca_data-1.)*Csca_data/Cext_data > threshold)
1293 // below equivalent to the above
1294 // (it's actually the (absolute) albedo deviation!)
1295 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1296 ostringstream os;
1297 os << " Deviations in scat_data too large:\n"
1298 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1299 << " at nominal (actual) albedo of "
1300 << Csca_data / Cext_data << " (" << Csca / Cext_data
1301 << ").\n"
1302 << " Check entry for scattering element " << i_se
1303 << " of scattering species " << i_ss << " at " << f
1304 << ". frequency, " << t << ". temperature, and " << iza
1305 << ". incident polar angle!\n";
1306 throw runtime_error(os.str());
1307 }
1308 }
1309 }
1310 }
1311 break;
1312 }
1313
1314 default: {
1315 out0
1316 << " WARNING:\n"
1317 << " scat_data consistency check not implemented (yet?!) for\n"
1318 << " ptype " << PART_TYPE << "!\n";
1319 }
1320 }
1321 else
1322 out2 << " WARNING:\n"
1323 << " scat_data norm check can not be performed for pha_mat-only"
1324 << " T-reduced scattering elements\n"
1325 << " as found in scatt element #" << i_se
1326 << " of scatt species #" << i_ss << "!\n";
1327 }
1328 } else if (check_type.toupper() == "SANE") {
1329 out1 << " WARNING:\n"
1330 << " Normalization check on pha_mat switched off.\n"
1331 << " Scattering solution might be wrong.\n";
1332 } else {
1333 ostringstream os;
1334 os << "Invalid value for argument *check_type*: '" << check_type << "'.\n";
1335 os << "Valid values are 'all' or 'none'.";
1336 throw runtime_error(os.str());
1337 }
1338}
1339
1340/* Workspace method: Doxygen documentation will be auto-generated */
1342 Workspace& ws, //Output:
1343 ArrayOfTensor7& pha_mat_sptDOITOpt,
1344 ArrayOfArrayOfSingleScatteringData& scat_data_mono,
1345 Tensor7& pha_mat_doit,
1346 //Output and Input:
1347 Vector& aa_grid,
1348 //Input:
1349 const Index& doit_za_grid_size,
1350 const ArrayOfArrayOfSingleScatteringData& scat_data,
1351 const Index& scat_data_checked,
1352 const Index& f_index,
1353 const Index& atmosphere_dim,
1354 const Index& stokes_dim,
1355 const Tensor3& t_field,
1356 const ArrayOfIndex& cloudbox_limits,
1357 const Tensor4& pnd_field,
1358 const Agenda& pha_mat_spt_agenda,
1359 const Verbosity& verbosity) {
1360 if (scat_data_checked != 1)
1361 throw runtime_error(
1362 "The scattering data must be flagged to have "
1363 "passed a consistency check (scat_data_checked=1).");
1364
1365 // Number of azimuth angles.
1366 const Index Naa = aa_grid.nelem();
1367 Vector grid_stepsize(2);
1368 grid_stepsize[0] = 180. / (Numeric)(doit_za_grid_size - 1);
1369 //grid_stepsize[1] = 360./(Numeric)(Naa - 1);
1370
1371 // Initialize variable *pha_mat_spt*
1372 Tensor5 pha_mat_spt_local(pnd_field.nbooks(),
1373 doit_za_grid_size,
1374 aa_grid.nelem(),
1375 stokes_dim,
1376 stokes_dim,
1377 0.);
1378 Tensor4 pha_mat_local(doit_za_grid_size, Naa, stokes_dim, stokes_dim, 0.);
1379 Tensor6 pha_mat_local_out(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1380 doit_za_grid_size,
1381 doit_za_grid_size,
1382 Naa,
1383 stokes_dim,
1384 stokes_dim,
1385 0.);
1386
1387 // Interpolate all the data in frequency
1388 scat_data_monoExtract(scat_data_mono, scat_data, f_index, verbosity);
1389
1390 // For 1D calculation the scat_aa dimension is not required:
1391 Index N_aa_sca;
1392 if (atmosphere_dim == 1)
1393 N_aa_sca = 1;
1394 else
1395 N_aa_sca = aa_grid.nelem();
1396
1397 Vector za_grid;
1398 nlinspace(za_grid, 0, 180, doit_za_grid_size);
1399
1400 ARTS_ASSERT(scat_data.nelem() == scat_data_mono.nelem());
1401
1402 const Index N_ss = scat_data.nelem();
1403 // FIXME: We need this still as a workspace variable because pha_mat_spt_agenda
1404 // contains a WS method that requires it as input
1405 pha_mat_sptDOITOpt.resize(TotalNumberOfElements(scat_data));
1406
1407 Index i_se_flat = 0;
1408 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
1409 const Index N_se = scat_data[i_ss].nelem();
1410
1411 for (Index i_se = 0; i_se < N_se; i_se++) {
1412 Index N_T = scat_data_mono[i_ss][i_se].T_grid.nelem();
1413 pha_mat_sptDOITOpt[i_se_flat].resize(N_T,
1414 doit_za_grid_size,
1415 N_aa_sca,
1416 doit_za_grid_size,
1417 aa_grid.nelem(),
1418 stokes_dim,
1419 stokes_dim);
1420
1421 // Initialize:
1422 pha_mat_sptDOITOpt[i_se_flat] = 0.;
1423
1424 // Calculate all scattering angles for all combinations of incoming
1425 // and scattered directions and interpolation.
1426 for (Index t_idx = 0; t_idx < N_T; t_idx++) {
1427 // These are the scattered directions as called in *scat_field_calc*
1428 for (Index za_sca_idx = 0; za_sca_idx < doit_za_grid_size;
1429 za_sca_idx++) {
1430 for (Index aa_sca_idx = 0; aa_sca_idx < N_aa_sca; aa_sca_idx++) {
1431 // Integration is performed over all incoming directions
1432 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
1433 za_inc_idx++) {
1434 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
1435 aa_inc_idx++) {
1437 pha_mat_sptDOITOpt[i_se_flat](t_idx,
1438 za_sca_idx,
1439 aa_sca_idx,
1440 za_inc_idx,
1441 aa_inc_idx,
1442 joker,
1443 joker),
1444 scat_data_mono[i_ss][i_se].pha_mat_data(
1445 0, t_idx, joker, joker, joker, joker, joker),
1446 scat_data_mono[i_ss][i_se].za_grid,
1447 scat_data_mono[i_ss][i_se].aa_grid,
1448 scat_data_mono[i_ss][i_se].ptype,
1449 za_sca_idx,
1450 aa_sca_idx,
1451 za_inc_idx,
1452 aa_inc_idx,
1453 za_grid,
1454 aa_grid,
1455 verbosity);
1456 }
1457 }
1458 }
1459 }
1460 }
1461
1462 i_se_flat++;
1463 }
1464 }
1465 // Interpolate phase matrix to current grid
1466 pha_mat_doit.resize(cloudbox_limits[1] - cloudbox_limits[0] + 1,
1467 doit_za_grid_size,
1468 N_aa_sca,
1469 doit_za_grid_size,
1470 Naa,
1471 stokes_dim,
1472 stokes_dim);
1473 pha_mat_doit = 0;
1474
1475 if (atmosphere_dim == 1) {
1476 Index aa_index_local = 0;
1477
1478 // Get pha_mat at the grid positions
1479 // Since atmosphere_dim = 1, there is no loop over lat and lon grids
1480 for (Index p_index = 0; p_index <= cloudbox_limits[1] - cloudbox_limits[0];
1481 p_index++) {
1482 Numeric rtp_temperature_local =
1483 t_field(p_index + cloudbox_limits[0], 0, 0);
1484 //There is only loop over zenith angle grid ; no azimuth angle grid.
1485 for (Index za_index_local = 0;
1486 za_index_local < doit_za_grid_size;
1487 za_index_local++) {
1488 // Dummy index
1489 Index index_zero = 0;
1490
1491 // Calculate the phase matrix of individual scattering elements
1493 pha_mat_spt_local,
1494 za_index_local,
1495 index_zero,
1496 index_zero,
1497 p_index,
1498 aa_index_local,
1499 rtp_temperature_local,
1500 pha_mat_spt_agenda);
1501
1502 // Sum over all scattering elements
1503 pha_matCalc(pha_mat_local,
1504 pha_mat_spt_local,
1505 pnd_field,
1506 atmosphere_dim,
1507 p_index,
1508 0,
1509 0,
1510 verbosity);
1511 pha_mat_doit(
1512 p_index, za_index_local, 0, joker, joker, joker, joker) =
1513 pha_mat_local;
1514 }
1515 }
1516
1517 // Set incoming azimuth grid scattering to zero, because there is no
1518 // no azimuth dependcy for 1d atmospheres
1519 aa_grid.resize(1);
1520 aa_grid = 0;
1521 }
1522}
1523
1524/* Workspace method: Doxygen documentation will be auto-generated */
1526 const ArrayOfArrayOfSingleScatteringData& scat_data_raw,
1527 const Vector& f_grid,
1528 const Index& interp_order,
1529 const Verbosity&)
1530// FIXME: when we allow K, a, Z to be on different f and T grids, their use in
1531// the scatt solvers needs to be reviewed again and adaptedto this!
1532{
1533 //Extrapolation factor:
1534 //const Numeric extpolfac = 0.5;
1535
1536 Index nf = f_grid.nelem();
1537
1538 // Check, whether single scattering data contains the right frequencies:
1539 // The check was changed to allow extrapolation at the boundaries of the
1540 // frequency grid.
1541 const String which_interpolation = "scat_data_raw.f_grid to f_grid";
1542 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1543 for (Index i_se = 0; i_se < scat_data_raw[i_ss].nelem(); i_se++) {
1544 // Check for the special case that ssd.f_grid f_grid have only one
1545 // element. If identical, that's fine. If not, throw error.
1546 if (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1 && nf == 1)
1547 if (!is_same_within_epsilon(scat_data_raw[i_ss][i_se].f_grid[0],
1548 f_grid[0],
1549 2 * DBL_EPSILON)) {
1550 ostringstream os;
1551 os << "There is a problem with the grids for the following "
1552 << "interpolation:\n"
1553 << which_interpolation << "\n"
1554 << "If original grid has only 1 element, the new grid must also have\n"
1555 << "only a single element and hold the same value as the original grid.";
1556 throw runtime_error(os.str());
1557 }
1558
1559 // check with extrapolation
1560 chk_interpolation_grids(which_interpolation,
1561 scat_data_raw[i_ss][i_se].f_grid,
1562 f_grid,
1563 interp_order);
1564 }
1565 }
1566
1567 //Initialise scat_data
1568 scat_data.resize(scat_data_raw.nelem());
1569
1570 // Loop over the included scattering species
1571 for (Index i_ss = 0; i_ss < scat_data_raw.nelem(); i_ss++) {
1572 const Index N_se = scat_data_raw[i_ss].nelem();
1573
1574 //Initialise scat_data
1575 scat_data[i_ss].resize(N_se);
1576
1577 // Loop over the included scattering elements
1578 for (Index i_se = 0; i_se < N_se; i_se++) {
1579 //Stuff that doesn't need interpolating
1580 PART_TYPE = scat_data_raw[i_ss][i_se].ptype;
1581 F_DATAGRID = f_grid;
1582 T_DATAGRID = scat_data_raw[i_ss][i_se].T_grid;
1583 ZA_DATAGRID = scat_data_raw[i_ss][i_se].za_grid;
1584 AA_DATAGRID = scat_data_raw[i_ss][i_se].aa_grid;
1585
1586 //Sizing SSD data containers
1587 PHA_MAT_DATA.resize(nf,
1588 scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines(),
1589 scat_data_raw[i_ss][i_se].pha_mat_data.nshelves(),
1590 scat_data_raw[i_ss][i_se].pha_mat_data.nbooks(),
1591 scat_data_raw[i_ss][i_se].pha_mat_data.npages(),
1592 scat_data_raw[i_ss][i_se].pha_mat_data.nrows(),
1593 scat_data_raw[i_ss][i_se].pha_mat_data.ncols());
1594 EXT_MAT_DATA.resize(nf,
1595 scat_data_raw[i_ss][i_se].ext_mat_data.nbooks(),
1596 scat_data_raw[i_ss][i_se].ext_mat_data.npages(),
1597 scat_data_raw[i_ss][i_se].ext_mat_data.nrows(),
1598 scat_data_raw[i_ss][i_se].ext_mat_data.ncols());
1599 ABS_VEC_DATA.resize(nf,
1600 scat_data_raw[i_ss][i_se].abs_vec_data.nbooks(),
1601 scat_data_raw[i_ss][i_se].abs_vec_data.npages(),
1602 scat_data_raw[i_ss][i_se].abs_vec_data.nrows(),
1603 scat_data_raw[i_ss][i_se].abs_vec_data.ncols());
1604
1605 const bool single_se_fgrid =
1606 (scat_data_raw[i_ss][i_se].f_grid.nelem() == 1);
1607 if (!single_se_fgrid) {
1608 // Gridpositions:
1609 const auto lag_freq=Interpolation::LagrangeVector(f_grid, scat_data_raw[i_ss][i_se].f_grid, interp_order);
1610 const auto itw = interpweights(lag_freq);
1611
1612 //Phase matrix data
1613 for (Index t_index = 0;
1614 t_index < scat_data_raw[i_ss][i_se].pha_mat_data.nvitrines();
1615 t_index++) {
1616 for (Index i_za_sca = 0;
1617 i_za_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nshelves();
1618 i_za_sca++) {
1619 for (Index i_aa_sca = 0;
1620 i_aa_sca < scat_data_raw[i_ss][i_se].pha_mat_data.nbooks();
1621 i_aa_sca++) {
1622 for (Index i_za_inc = 0;
1623 i_za_inc < scat_data_raw[i_ss][i_se].pha_mat_data.npages();
1624 i_za_inc++) {
1625 for (Index i_aa_inc = 0;
1626 i_aa_inc < scat_data_raw[i_ss][i_se].pha_mat_data.nrows();
1627 i_aa_inc++) {
1628 for (Index i = 0;
1629 i < scat_data_raw[i_ss][i_se].pha_mat_data.ncols();
1630 i++) {
1631 reinterp(scat_data[i_ss][i_se].pha_mat_data(joker,
1632 t_index,
1633 i_za_sca,
1634 i_aa_sca,
1635 i_za_inc,
1636 i_aa_inc,
1637 i),
1638 scat_data_raw[i_ss][i_se].pha_mat_data(joker,
1639 t_index,
1640 i_za_sca,
1641 i_aa_sca,
1642 i_za_inc,
1643 i_aa_inc,
1644 i),
1645 itw,
1646 lag_freq);
1647 }
1648 }
1649 }
1650 }
1651 }
1652 }
1653
1654 //Extinction matrix data
1655 for (Index t_index = 0;
1656 t_index < scat_data_raw[i_ss][i_se].ext_mat_data.nbooks();
1657 t_index++) {
1658 for (Index i_za_sca = 0;
1659 i_za_sca < scat_data_raw[i_ss][i_se].ext_mat_data.npages();
1660 i_za_sca++) {
1661 for (Index i_aa_sca = 0;
1662 i_aa_sca < scat_data_raw[i_ss][i_se].ext_mat_data.nrows();
1663 i_aa_sca++) {
1664 for (Index i = 0;
1665 i < scat_data_raw[i_ss][i_se].ext_mat_data.ncols();
1666 i++) {
1667 reinterp(scat_data[i_ss][i_se].ext_mat_data(
1668 joker, t_index, i_za_sca, i_aa_sca, i),
1669 scat_data_raw[i_ss][i_se].ext_mat_data(
1670 joker, t_index, i_za_sca, i_aa_sca, i),
1671 itw,
1672 lag_freq);
1673 }
1674 }
1675 }
1676 }
1677
1678 //Absorption vector data
1679 for (Index t_index = 0;
1680 t_index < scat_data_raw[i_ss][i_se].abs_vec_data.nbooks();
1681 t_index++) {
1682 for (Index i_za_sca = 0;
1683 i_za_sca < scat_data_raw[i_ss][i_se].abs_vec_data.npages();
1684 i_za_sca++) {
1685 for (Index i_aa_sca = 0;
1686 i_aa_sca < scat_data_raw[i_ss][i_se].abs_vec_data.nrows();
1687 i_aa_sca++) {
1688 for (Index i = 0;
1689 i < scat_data_raw[i_ss][i_se].abs_vec_data.ncols();
1690 i++) {
1691 reinterp(scat_data[i_ss][i_se].abs_vec_data(
1692 joker, t_index, i_za_sca, i_aa_sca, i),
1693 scat_data_raw[i_ss][i_se].abs_vec_data(
1694 joker, t_index, i_za_sca, i_aa_sca, i),
1695 itw,
1696 lag_freq);
1697 }
1698 }
1699 }
1700 }
1701 } else {
1702 ARTS_ASSERT(nf == 1);
1703 // we do only have one f_grid value in old and new data (and they have
1704 // been confirmed to be the same), hence only need to copy over/reassign
1705 // the data.
1706 scat_data[i_ss][i_se].pha_mat_data =
1707 scat_data_raw[i_ss][i_se].pha_mat_data;
1708 scat_data[i_ss][i_se].ext_mat_data =
1709 scat_data_raw[i_ss][i_se].ext_mat_data;
1710 scat_data[i_ss][i_se].abs_vec_data =
1711 scat_data_raw[i_ss][i_se].abs_vec_data;
1712 }
1713 }
1714 }
1715}
1716
1717/* Workspace method: Doxygen documentation will be auto-generated */
1719 const Index& i_ss,
1720 const Numeric& T,
1721 const Index& interp_order,
1722 const Index& phamat_only,
1723 const Numeric& threshold,
1724 const Verbosity&) {
1725 // We are directly acting on the scat_data entries, modifying them
1726 // individually. That is, we don't need to resize these arrays. Only the
1727 // pha_mat and probably ext_mat and abs_vec Tensors (in the latter case also
1728 // T_grid!).
1729
1730 // Check that species i_ss exists at all in scat_data
1731 const Index nss = scat_data.nelem();
1732 if (nss <= i_ss) {
1733 ostringstream os;
1734 os << "Can not T-reduce scattering species #" << i_ss << ".\n"
1735 << "*scat_data* contains only " << nss << " scattering species.";
1736 throw runtime_error(os.str());
1737 }
1738
1739 // Loop over the included scattering elements
1740 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
1741 // At very first check validity of the scatt elements ptype (so far we only
1742 // handle PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND).
1744 ostringstream os;
1745 os << "Only ptypes " << PTYPE_TOTAL_RND << " and " << PTYPE_AZIMUTH_RND
1746 << " can be handled.\n"
1747 << "Scattering element #" << i_se << " has ptype " << PART_TYPE << ".";
1748 throw runtime_error(os.str());
1749 }
1750
1751 // If ssd.T_grid already has only a single point, we do nothing.
1752 // This is not necessarily expected behaviour. BUT, it is in line with
1753 // previous use (that if nT==1, then assume ssd constant in T).
1754 Index nT = T_DATAGRID.nelem();
1755 if (nT > 1) {
1756 // Check, that we not have data that has already been T-reduced (in
1757 // pha_mat only. complete ssd T-reduce should have been sorted away
1758 // already above).
1759 if (PHA_MAT_DATA.nvitrines() != nT) {
1760 ostringstream os;
1761 os << "Single scattering data of scat element #" << i_se
1762 << " of scat species #" << i_ss << "\n"
1763 << "seems to have undergone some temperature grid manipulation in\n"
1764 << "*pha_mat_data* already. That can not be done twice!";
1765 throw runtime_error(os.str());
1766 }
1767
1768 // Check that ext_mat and abs_vec have the same temp dimensions as T_grid.
1769 // This should always be true, if not it's a bug not a user mistake, hence
1770 // use ARTS_ASSERT.
1771 ARTS_ASSERT(EXT_MAT_DATA.nbooks() == nT and ABS_VEC_DATA.nbooks() == nT);
1772
1773 // Check that T_grid is consistent with requested interpolation order
1774 ostringstream ost;
1775 ost << "Scattering data temperature interpolation for\n"
1776 << "scat element #" << i_se << " of scat species #" << i_ss << ".";
1777 chk_interpolation_grids(ost.str(), T_DATAGRID, T, interp_order);
1778
1779 // Gridpositions:
1780 const LagrangeInterpolation lag_T(0, T, T_DATAGRID, interp_order);
1781 const auto itw = interpweights(lag_T);
1782
1783 //Sizing of temporary SSD data containers
1784 Tensor7 phamat_tmp(PHA_MAT_DATA.nlibraries(),
1785 1,
1786 PHA_MAT_DATA.nshelves(),
1787 PHA_MAT_DATA.nbooks(),
1788 PHA_MAT_DATA.npages(),
1789 PHA_MAT_DATA.nrows(),
1790 PHA_MAT_DATA.ncols(),
1791 0.);
1792 Tensor5 extmat_tmp(EXT_MAT_DATA.nshelves(),
1793 1,
1794 EXT_MAT_DATA.npages(),
1795 EXT_MAT_DATA.nrows(),
1796 EXT_MAT_DATA.ncols(),
1797 0.);
1798 Tensor5 absvec_tmp(ABS_VEC_DATA.nshelves(),
1799 1,
1800 ABS_VEC_DATA.npages(),
1801 ABS_VEC_DATA.nrows(),
1802 ABS_VEC_DATA.ncols(),
1803 0.);
1804
1805 // a1) temp interpol of pha mat
1806 //We have to apply the interpolation separately for each of the pha_mat
1807 //entries, i.e. loop over all remaining size dimensions
1808 //We don't apply any transformation here, but interpolate the actual
1809 //stored ssd (i.e. not the 4x4matrices, but the 7-16 elements separately).
1810 for (Index i_f = 0; i_f < PHA_MAT_DATA.nlibraries(); i_f++)
1811 for (Index i_za1 = 0; i_za1 < PHA_MAT_DATA.nshelves(); i_za1++)
1812 for (Index i_aa1 = 0; i_aa1 < PHA_MAT_DATA.nbooks(); i_aa1++)
1813 for (Index i_za2 = 0; i_za2 < PHA_MAT_DATA.npages(); i_za2++)
1814 for (Index i_aa2 = 0; i_aa2 < PHA_MAT_DATA.nrows(); i_aa2++)
1815 for (Index i_st = 0; i_st < PHA_MAT_DATA.ncols(); i_st++)
1816 phamat_tmp(i_f, 0, i_za1, i_aa1, i_za2, i_aa2, i_st) =
1818 i_f, joker, i_za1, i_aa1, i_za2, i_aa2, i_st),
1819 itw,
1820 lag_T);
1821
1822 // a2) temp interpol of ext and abs.
1823 //We do that regardless of whether they should be reduced or not, because
1824 //we need them also for norm checking / renorming.
1825 for (Index i_f = 0; i_f < EXT_MAT_DATA.nshelves(); i_f++)
1826 for (Index i_za = 0; i_za < EXT_MAT_DATA.npages(); i_za++)
1827 for (Index i_aa = 0; i_aa < EXT_MAT_DATA.nrows(); i_aa++) {
1828 for (Index i_st = 0; i_st < EXT_MAT_DATA.ncols(); i_st++)
1829 extmat_tmp(i_f, 0, i_za, i_aa, i_st) =
1830 interp(EXT_MAT_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1831 for (Index i_st = 0; i_st < ABS_VEC_DATA.ncols(); i_st++)
1832 absvec_tmp(i_f, 0, i_za, i_aa, i_st) =
1833 interp(ABS_VEC_DATA(i_f, joker, i_za, i_aa, i_st), itw, lag_T);
1834 }
1835
1836 // Norm & other consistency checks.
1837 // All done separately for PTYPE_TOTAL_RND and PTYPE_AZIMUTH_RND (the
1838 // latter needs to loop over scat_za_inc).
1839 //
1840 // b) calculate norm of T-reduced pha mat
1841 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol
1842 // d) Ensure that T-reduced data is consistent/representative of all data.
1843 // and throw error/disallow reduction if sca xs varying too much.
1844 // d1) in case of pha_mat only reduction, the scat xs (pha_mat norm) needs
1845 // to be consistent with the sca xs from over the ext/abs T_grid. This is
1846 // essentially an energy conservation issue. That is, we should be as
1847 // strict here as with pha_mat norm deviations in general (however, should
1848 // we allow the norm at T_grid to deviate by a threshold from the
1849 // T_interpol norm (that should be a little looser) or from the ext-abs
1850 // derived expected norm?).
1851 // d2) in case of all-ssp reduction, the data should still be
1852 // representative. b)&c) ensure data consistency in itself, making this
1853 // rather an error on the SSP as such. Hence, we might be a little more
1854 // loose here.
1855 // d) the resulting check for d1) and d2) is the same (ext-abs sca xs at
1856 // T_interpol vs ext-abs sca xs at T_grid), but we use different
1857 // thresholds.
1858 //
1859 // FIXME?
1860 // Regarding b)&c) should we also calc norm of original-T pha mats? To get
1861 // a measure how strong they already deviate from expected norm (As we can
1862 // not be more exact here than what is already in the original data...).
1863 // On the other hand, a certain accuracy should be guaranteed from
1864 // scat_dataCheck already.
1865 // Hence, for now we skip that (but maybe added later when proves
1866 // necessary).
1867 //
1868 // FIXME?
1869 // Regarding d1), we could alternatively make sure here that norm at
1870 // T_interpol is good. And later on ignore any deviations between norm and
1871 // ext-abs sca xs and instead blindly renorm to expected norm (would that
1872 // be ok? correct norm here, doesn't imply correct norm at whatever scat
1873 // angle grids the user is applying. for that, we could in place also calc
1874 // the original-data norm. but that might be expensive (as we can't do
1875 // that from ext-abs sca xs, because we don't know to which T that refers.
1876 // that would go away if we'd actually store pha_mat normed to 1 or 4Pi.
1877 // but that's prob not going to happen. is it? Another option would be to
1878 // introduce an additional T_grid, eg T_grid_phamat.). which we actually
1879 // want to avoid :-/
1880
1881 Numeric this_threshold;
1882 String errmsg;
1883 if (phamat_only) {
1884 this_threshold = threshold;
1885 errmsg =
1886 "T-reduced *pha_mat_data* norm (=sca xs) deviates too "
1887 "much from non-reduced *ext_mat_data* and *abs_vec_data*:";
1888 } else {
1889 this_threshold = 2 * threshold;
1890 errmsg =
1891 "T-reduced *scat_data* deviates too much from original "
1892 "*scat_data*:";
1893 }
1894
1895 // The norm-check code is copied and slightly adapted from scat_dataCheck.
1896 // Might be better to make a functon out of this and use in both places
1897 // for consistency.
1898 //
1899 // FIXME: no checks on higher Stokes elements are done. Should there?
1900 // Which?
1901 switch (PART_TYPE) {
1902 case PTYPE_TOTAL_RND: {
1903 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1904 // b) calculate norm of T-reduced pha mat
1906 phamat_tmp(f, 0, joker, 0, 0, 0, 0), ZA_DATAGRID);
1907 Numeric Cext_data = extmat_tmp(f, 0, 0, 0, 0);
1908 //Numeric Cabs = Cext_data - Csca;
1909 Numeric Cabs_data = absvec_tmp(f, 0, 0, 0, 0);
1910 Numeric Csca_data = Cext_data - Cabs_data;
1911
1912 /*
1913 cout << " Coefficients in data: "
1914 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1915 << " Csca: " << Csca_data << "\n"
1916 << " Calculated coefficients: "
1917 << "Cabs calc: " << Cabs
1918 << " Csca calc: " << Csca << "\n"
1919 << " Deviations "
1920 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1921 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1922 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1923 */
1924
1925 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1926 // albedo dev check)
1927 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1928 ostringstream os;
1929 os << " Deviations in T-reduced scat_data too large:\n"
1930 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1931 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1932 << " (" << Csca / Cext_data << ").\n"
1933 << " Problem occurs for scattering element #" << i_se
1934 << " at " << f << ".frequency!\n";
1935 throw runtime_error(os.str());
1936 }
1937 Numeric norm_dev = (Csca - Csca) / Cext_data;
1938
1939 // d) Ensure that T-reduced data is consistent/representative of all data.
1940 // below use theoretical (ext-abs derived) sca xs as reference.
1941 Csca = Csca_data;
1942 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
1943 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
1944 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
1945 Numeric xs_dev = (Csca - Csca_data) / Cext_data;
1946 if (abs(norm_dev + (Csca - Csca_data) / Cext_data) >
1947 this_threshold)
1948 cout << "Accumulated deviation (abs(" << norm_dev << "+"
1949 << xs_dev << ")=" << abs(norm_dev + xs_dev)
1950 << " exceeding threshold (" << this_threshold << ").\n";
1951 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
1952 ostringstream os;
1953 os << " " << errmsg << "\n"
1954 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1955 << " at nominal (actual) albedo of " << Csca_data / Cext_data
1956 << " (" << Csca / Cext_data << ").\n"
1957 << " Problem occurs for scattering element #" << i_se
1958 << " at " << f << ".frequency and " << t
1959 << ".temperature!\n";
1960 throw runtime_error(os.str());
1961 }
1962 }
1963 }
1964 break;
1965 }
1966
1967 case PTYPE_AZIMUTH_RND: {
1968 for (Index f = 0; f < F_DATAGRID.nelem(); f++) {
1969 for (Index iza = 0; iza < ABS_VEC_DATA.npages(); iza++) {
1970 // b) calculate norm of T-reduced pha mat
1972 phamat_tmp(f, 0, joker, joker, iza, 0, 0),
1974 AA_DATAGRID);
1975 Numeric Cext_data = extmat_tmp(f, 0, iza, 0, 0);
1976 //Numeric Cabs = Cext_data - Csca;
1977 Numeric Cabs_data = absvec_tmp(f, 0, iza, 0, 0);
1978 Numeric Csca_data = Cext_data - Cabs_data;
1979
1980 /*
1981 cout << " Coefficients in data: "
1982 << "Cext: " << Cext_data << " Cabs: " << Cabs_data
1983 << " Csca: " << Csca_data << "\n"
1984 << " Calculated coefficients: "
1985 << "Cabs calc: " << Cabs
1986 << " Csca calc: " << Csca << "\n"
1987 << " Deviations "
1988 << "Cabs: " << 1e2*Cabs/Cabs_data-1e2
1989 << "% Csca: " << 1e2*Csca/Csca_data-1e2
1990 << "% Alb: " << (Csca-Csca_data)/Cext_data << "\n";
1991 */
1992
1993 // c) check pha mat norm vs. sca xs from ext-abs at T_interpol (as
1994 // albedo dev check)
1995 if (abs(Csca - Csca_data) / Cext_data > threshold) {
1996 ostringstream os;
1997 os << " Deviations in T-reduced scat_data too large:\n"
1998 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
1999 << " at nominal (actual) albedo of " << Csca_data / Cext_data
2000 << " (" << Csca / Cext_data << ").\n"
2001 << " Problem occurs for scattering element #" << i_se
2002 << " at " << f << ".frequency, and " << iza
2003 << ". incident polar angle!\n";
2004 throw runtime_error(os.str());
2005 }
2006
2007 // d) Ensure that T-reduced data is consistent/representative of all data.
2008 // below use theoretical (ext-abs derived) sca xs as reference.
2009 Csca = Csca_data;
2010 for (Index t = 0; t < T_DATAGRID.nelem(); t++) {
2011 Cext_data = EXT_MAT_DATA(f, t, 0, 0, 0);
2012 Csca_data = Cext_data - ABS_VEC_DATA(f, t, 0, 0, 0);
2013 if (abs(Csca - Csca_data) / Cext_data > this_threshold) {
2014 ostringstream os;
2015 os << " " << errmsg << "\n"
2016 << " scat dev [%] " << 1e2 * Csca / Csca_data - 1e2
2017 << " at nominal (actual) albedo of "
2018 << Csca_data / Cext_data << " (" << Csca / Cext_data
2019 << ").\n"
2020 << " Problem occurs for scattering element #" << i_se
2021 << " at " << f << ".frequency and " << t
2022 << ".temperature, and " << iza
2023 << ". incident polar angle!\n";
2024 throw runtime_error(os.str());
2025 }
2026 }
2027 }
2028 }
2029 break;
2030 }
2031
2032 default: {
2033 // other ptype cases already excluded above. i.e. we shouldn't end up
2034 // here. If we do, that's a bug.
2035 ARTS_ASSERT(0);
2036 }
2037 }
2038
2039 PHA_MAT_DATA = phamat_tmp;
2040 //We don't need to reset the scat element's grids!
2041 //Except for T_grid in the case that we reduce ALL three ssd variables.
2042 if (!phamat_only) {
2043 T_DATAGRID.resize(1);
2044 T_DATAGRID = T;
2045 EXT_MAT_DATA = extmat_tmp;
2046 ABS_VEC_DATA = absvec_tmp;
2047 }
2048 }
2049 }
2050}
2051
2052/* Workspace method: Doxygen documentation will be auto-generated */
2054 const ArrayOfArrayOfSingleScatteringData& scat_data,
2055 const Vector& f_grid,
2056 const Index& f_index,
2057 const Verbosity&) {
2058 //Extrapolation factor:
2059 //const Numeric extpolfac = 0.5;
2060
2061 // Check, whether single scattering data contains the right frequencies:
2062 // The check was changed to allow extrapolation at the boundaries of the
2063 // frequency grid.
2064 for (Index h = 0; h < scat_data.nelem(); h++) {
2065 for (Index i = 0; i < scat_data[h].nelem(); i++) {
2066 // check with extrapolation
2067 chk_interpolation_grids("scat_data.f_grid to f_grid",
2068 scat_data[h][i].f_grid,
2069 f_grid[f_index]);
2070 }
2071 }
2072
2073 //Initialise scat_data_mono
2074 scat_data_mono.resize(scat_data.nelem());
2075
2076 // Loop over the included scattering species
2077 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2078 const Index N_se = scat_data[i_ss].nelem();
2079
2080 //Initialise scat_data_mono
2081 scat_data_mono[i_ss].resize(N_se);
2082
2083 // Loop over the included scattering elements
2084 for (Index i_se = 0; i_se < N_se; i_se++) {
2085 // Gridpositions:
2086 GridPos freq_gp;
2087 gridpos(freq_gp, F_DATAGRID, f_grid[f_index]);
2088
2089 // Interpolation weights:
2090 Vector itw(2);
2091 interpweights(itw, freq_gp);
2092
2093 //Stuff that doesn't need interpolating
2094 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2095 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2096 scat_data_mono[i_ss][i_se].f_grid = f_grid[f_index];
2097 scat_data_mono[i_ss][i_se].T_grid = scat_data[i_ss][i_se].T_grid;
2098 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2099 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2100
2101 //Phase matrix data
2102 scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2103 PHA_MAT_DATA.nvitrines(),
2104 PHA_MAT_DATA.nshelves(),
2105 PHA_MAT_DATA.nbooks(),
2106 PHA_MAT_DATA.npages(),
2107 PHA_MAT_DATA.nrows(),
2108 PHA_MAT_DATA.ncols());
2109
2110 for (Index t_index = 0; t_index < PHA_MAT_DATA.nvitrines(); t_index++) {
2111 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2112 i_za_sca++) {
2113 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2114 i_aa_sca++) {
2115 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2116 i_za_inc++) {
2117 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2118 i_aa_inc++) {
2119 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++) {
2120 scat_data_mono[i_ss][i_se].pha_mat_data(
2121 0, t_index, i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2122 interp(itw,
2124 t_index,
2125 i_za_sca,
2126 i_aa_sca,
2127 i_za_inc,
2128 i_aa_inc,
2129 i),
2130 freq_gp);
2131 }
2132 }
2133 }
2134 }
2135 }
2136 //Extinction matrix data
2137 scat_data_mono[i_ss][i_se].ext_mat_data.resize(1,
2138 T_DATAGRID.nelem(),
2139 EXT_MAT_DATA.npages(),
2140 EXT_MAT_DATA.nrows(),
2141 EXT_MAT_DATA.ncols());
2142 for (Index i_za_sca = 0; i_za_sca < EXT_MAT_DATA.npages(); i_za_sca++) {
2143 for (Index i_aa_sca = 0; i_aa_sca < EXT_MAT_DATA.nrows();
2144 i_aa_sca++) {
2145 //
2146 // Interpolation of extinction matrix:
2147 //
2148 for (Index i = 0; i < EXT_MAT_DATA.ncols(); i++) {
2149 scat_data_mono[i_ss][i_se].ext_mat_data(
2150 0, t_index, i_za_sca, i_aa_sca, i) =
2151 interp(itw,
2152 EXT_MAT_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2153 freq_gp);
2154 }
2155 }
2156 }
2157 //Absorption vector data
2158 scat_data_mono[i_ss][i_se].abs_vec_data.resize(1,
2159 T_DATAGRID.nelem(),
2160 ABS_VEC_DATA.npages(),
2161 ABS_VEC_DATA.nrows(),
2162 ABS_VEC_DATA.ncols());
2163 for (Index i_za_sca = 0; i_za_sca < ABS_VEC_DATA.npages(); i_za_sca++) {
2164 for (Index i_aa_sca = 0; i_aa_sca < ABS_VEC_DATA.nrows();
2165 i_aa_sca++) {
2166 //
2167 // Interpolation of absorption vector:
2168 //
2169 for (Index i = 0; i < ABS_VEC_DATA.ncols(); i++) {
2170 scat_data_mono[i_ss][i_se].abs_vec_data(
2171 0, t_index, i_za_sca, i_aa_sca, i) =
2172 interp(itw,
2173 ABS_VEC_DATA(joker, t_index, i_za_sca, i_aa_sca, i),
2174 freq_gp);
2175 }
2176 }
2177 }
2178 }
2179 }
2180 }
2181}
2182
2183/* Workspace method: Doxygen documentation will be auto-generated */
2185 const ArrayOfArrayOfSingleScatteringData& scat_data,
2186 const Index& f_index,
2187 const Verbosity&) {
2188 //Initialise scat_data_mono
2189 scat_data_mono.resize(scat_data.nelem());
2190
2191 // Loop over the included scattering species
2192 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2193 const Index N_se = scat_data[i_ss].nelem();
2194
2195 //Initialise scat_data_mono
2196 scat_data_mono[i_ss].resize(N_se);
2197
2198 // Loop over the included scattering elements
2199 for (Index i_se = 0; i_se < N_se; i_se++) {
2200 Index nf = F_DATAGRID.nelem();
2201 if (nf == 1) {
2202 scat_data_mono[i_ss][i_se] = scat_data[i_ss][i_se];
2203 } else {
2204 //Stuff that doesn't need interpolating
2205 scat_data_mono[i_ss][i_se].ptype = PART_TYPE;
2206 scat_data_mono[i_ss][i_se].T_grid = T_DATAGRID;
2207 scat_data_mono[i_ss][i_se].za_grid = ZA_DATAGRID;
2208 scat_data_mono[i_ss][i_se].aa_grid = AA_DATAGRID;
2209
2210 scat_data_mono[i_ss][i_se].f_grid.resize(1);
2211 scat_data_mono[i_ss][i_se].f_grid = F_DATAGRID[f_index];
2212
2213 Index this_f_index;
2214
2215 //Phase matrix data
2216 /*scat_data_mono[i_ss][i_se].pha_mat_data.resize(1,
2217 PHA_MAT_DATA.nvitrines(),
2218 PHA_MAT_DATA.nshelves(),
2219 PHA_MAT_DATA.nbooks(),
2220 PHA_MAT_DATA.npages(),
2221 PHA_MAT_DATA.nrows(),
2222 PHA_MAT_DATA.ncols());*/
2223 this_f_index = (PHA_MAT_DATA.nlibraries() == 1) ? 0 : f_index;
2224 scat_data_mono[i_ss][i_se].pha_mat_data = PHA_MAT_DATA(
2225 Range(this_f_index, 1), joker, joker, joker, joker, joker, joker);
2226
2227 //Extinction matrix data
2228 /*scat_data_mono[i_ss][i_se].ext_mat_data.resize(1, T_DATAGRID.nelem(),
2229 EXT_MAT_DATA.npages(),
2230 EXT_MAT_DATA.nrows(),
2231 EXT_MAT_DATA.ncols());*/
2232 this_f_index = (EXT_MAT_DATA.nshelves() == 1) ? 0 : f_index;
2233 scat_data_mono[i_ss][i_se].ext_mat_data =
2234 EXT_MAT_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2235
2236 //Absorption vector data
2237 /*scat_data_mono[i_ss][i_se].abs_vec_data.resize(1, T_DATAGRID.nelem(),
2238 ABS_VEC_DATA.npages(),
2239 ABS_VEC_DATA.nrows(),
2240 ABS_VEC_DATA.ncols());*/
2241 this_f_index = (ABS_VEC_DATA.nshelves() == 1) ? 0 : f_index;
2242 scat_data_mono[i_ss][i_se].abs_vec_data =
2243 ABS_VEC_DATA(Range(this_f_index, 1), joker, joker, joker, joker);
2244 }
2245 }
2246 }
2247}
2248
2249/* Workspace method: Doxygen documentation will be auto-generated */
2250void opt_prop_sptFromMonoData( // Output and Input:
2251 ArrayOfPropagationMatrix& ext_mat_spt,
2252 ArrayOfStokesVector& abs_vec_spt,
2253 // Input:
2254 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2255 const Vector& za_grid,
2256 const Vector& aa_grid,
2257 const Index& za_index, // propagation directions
2258 const Index& aa_index,
2259 const Numeric& rtp_temperature,
2260 const Tensor4& pnd_field,
2261 const Index& scat_p_index,
2262 const Index& scat_lat_index,
2263 const Index& scat_lon_index,
2264 const Verbosity& verbosity) {
2265 DEBUG_ONLY(const Index N_se_total = TotalNumberOfElements(scat_data_mono);)
2266 const Index stokes_dim = ext_mat_spt[0].StokesDimensions();
2267 const Numeric za_sca = za_grid[za_index];
2268 const Numeric aa_sca = aa_grid[aa_index];
2269
2270 if (stokes_dim > 4 or stokes_dim < 1) {
2271 throw runtime_error(
2272 "The dimension of the stokes vector \n"
2273 "must be 1,2,3 or 4");
2274 }
2275
2276 ARTS_ASSERT(ext_mat_spt.nelem() == N_se_total);
2277 ARTS_ASSERT(abs_vec_spt.nelem() == N_se_total);
2278
2279 // Check that we do indeed have scat_data_mono here. Only checking the first
2280 // scat element, assuming the other elements have been processed in the same
2281 // manner. That's save against having scat_data here if that originated from
2282 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2283 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2284 // produced from scat_data_singleTmatrix. That would be too costly here,
2285 // though.
2286 // Also, we can't check here whether data is at the correct frequency since we
2287 // don't know f_grid and f_index here (we could pass it in, though).
2288 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2289 ostringstream os;
2290 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2291 << "but *scat_data_mono* (1 freq point only) is expected here.";
2292 throw runtime_error(os.str());
2293 }
2294
2295 // Initialisation
2296 for (auto& pm : ext_mat_spt) pm.SetZero();
2297 for (auto& av : abs_vec_spt) av.SetZero();
2298
2299 GridPos t_gp;
2300
2301 Vector itw(2);
2302
2303 Index i_se_flat = 0;
2304 // Loop over the included scattering elements
2305 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2306 // Loop over the included scattering elements
2307 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2308 // If the particle number density at a specific point in the
2309 // atmosphere for the i_se scattering element is zero, we don't need
2310 // to do the transformation!
2311 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2312 PND_LIMIT) {
2313 // First we have to transform the data from the coordinate system
2314 // used in the database (depending on the kind of ptype) to the
2315 // laboratory coordinate system.
2316
2317 //
2318 // Do the transformation into the laboratory coordinate system.
2319 //
2320 // Extinction matrix:
2321 //
2322 Index ext_npages = scat_data_mono[i_ss][i_se].ext_mat_data.npages();
2323 Index ext_nrows = scat_data_mono[i_ss][i_se].ext_mat_data.nrows();
2324 Index ext_ncols = scat_data_mono[i_ss][i_se].ext_mat_data.ncols();
2325 Index abs_npages = scat_data_mono[i_ss][i_se].abs_vec_data.npages();
2326 Index abs_nrows = scat_data_mono[i_ss][i_se].abs_vec_data.nrows();
2327 Index abs_ncols = scat_data_mono[i_ss][i_se].abs_vec_data.ncols();
2328
2329 //Check that scattering data temperature range covers required temperature
2330 ConstVectorView t_grid = scat_data_mono[i_ss][i_se].T_grid;
2331
2332 if (t_grid.nelem() > 1) {
2333 ostringstream os;
2334 os << "In opt_prop_sptFromMonoData.\n"
2335 << "The temperature grid of the scattering data does not\n"
2336 << "cover the atmospheric temperature at cloud location.\n"
2337 << "The data should include the value T = " << rtp_temperature
2338 << " K.";
2339 chk_interpolation_grids(os.str(), t_grid, rtp_temperature);
2340
2341 //interpolate over temperature
2342 Tensor3 ext_mat_data1temp(ext_npages, ext_nrows, ext_ncols);
2343 gridpos(t_gp, t_grid, rtp_temperature);
2344 interpweights(itw, t_gp);
2345 for (Index i_p = 0; i_p < ext_npages; i_p++) {
2346 for (Index i_r = 0; i_r < ext_nrows; i_r++) {
2347 for (Index i_c = 0; i_c < ext_ncols; i_c++) {
2348 ext_mat_data1temp(i_p, i_r, i_c) =
2349 interp(itw,
2350 scat_data_mono[i_ss][i_se].ext_mat_data(
2351 0, joker, i_p, i_r, i_c),
2352 t_gp);
2353 }
2354 }
2355 }
2356 ext_matTransform(ext_mat_spt[i_se_flat],
2357 ext_mat_data1temp,
2358 scat_data_mono[i_ss][i_se].za_grid,
2359 scat_data_mono[i_ss][i_se].aa_grid,
2360 scat_data_mono[i_ss][i_se].ptype,
2361 za_sca,
2362 aa_sca,
2363 verbosity);
2364 } else {
2365 ext_matTransform(ext_mat_spt[i_se_flat],
2366 scat_data_mono[i_ss][i_se].ext_mat_data(
2367 0, 0, joker, joker, joker),
2368 scat_data_mono[i_ss][i_se].za_grid,
2369 scat_data_mono[i_ss][i_se].aa_grid,
2370 scat_data_mono[i_ss][i_se].ptype,
2371 za_sca,
2372 aa_sca,
2373 verbosity);
2374 }
2375 //
2376 // Absorption vector:
2377 //
2378
2379 if (t_grid.nelem() > 1) {
2380 Tensor3 abs_vec_data1temp(abs_npages, abs_nrows, abs_ncols);
2381 //interpolate over temperature
2382 for (Index i_p = 0; i_p < abs_npages; i_p++) {
2383 for (Index i_r = 0; i_r < abs_nrows; i_r++) {
2384 for (Index i_c = 0; i_c < abs_ncols; i_c++) {
2385 abs_vec_data1temp(i_p, i_r, i_c) =
2386 interp(itw,
2387 scat_data_mono[i_ss][i_se].abs_vec_data(
2388 0, joker, i_p, i_r, i_c),
2389 t_gp);
2390 }
2391 }
2392 }
2393 abs_vecTransform(abs_vec_spt[i_se_flat],
2394 abs_vec_data1temp,
2395 scat_data_mono[i_ss][i_se].za_grid,
2396 scat_data_mono[i_ss][i_se].aa_grid,
2397 scat_data_mono[i_ss][i_se].ptype,
2398 za_sca,
2399 aa_sca,
2400 verbosity);
2401 } else {
2402 abs_vecTransform(abs_vec_spt[i_se_flat],
2403 scat_data_mono[i_ss][i_se].abs_vec_data(
2404 0, 0, joker, joker, joker),
2405 scat_data_mono[i_ss][i_se].za_grid,
2406 scat_data_mono[i_ss][i_se].aa_grid,
2407 scat_data_mono[i_ss][i_se].ptype,
2408 za_sca,
2409 aa_sca,
2410 verbosity);
2411 }
2412 }
2413
2414 i_se_flat++;
2415 }
2416 }
2417}
2418
2419/* Workspace method: Doxygen documentation will be auto-generated */
2421 Tensor5& pha_mat_spt,
2422 // Input:
2423 const ArrayOfArrayOfSingleScatteringData& scat_data_mono,
2424 const Index& doit_za_grid_size,
2425 const Vector& aa_grid,
2426 const Index& za_index, // propagation directions
2427 const Index& aa_index,
2428 const Numeric& rtp_temperature,
2429 const Tensor4& pnd_field,
2430 const Index& scat_p_index,
2431 const Index& scat_lat_index,
2432 const Index& scat_lon_index,
2433 const Verbosity& verbosity) {
2434 Vector za_grid;
2435 nlinspace(za_grid, 0, 180, doit_za_grid_size);
2436
2437 const Index N_se_total = TotalNumberOfElements(scat_data_mono);
2438 if (N_se_total != pnd_field.nbooks()) {
2439 ostringstream os;
2440 os << "Total number of scattering elements in *scat_data_mono* "
2441 << "inconsistent with size of pnd_field.";
2442 throw runtime_error(os.str());
2443 }
2444 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2445 // checks the same as the runtime_error above. Still, we keep it to be on the
2446 // save side.
2447 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2448
2449 const Index stokes_dim = pha_mat_spt.ncols();
2450 if (stokes_dim > 4 || stokes_dim < 1) {
2451 throw runtime_error(
2452 "The dimension of the stokes vector \n"
2453 "must be 1,2,3 or 4");
2454 }
2455
2456 // Check that we do indeed have scat_data_mono here. Only checking the first
2457 // scat element, assuming the other elements have been processed in the same
2458 // manner. That's save against having scat_data here if that originated from
2459 // scat_data_raw reading routines (ScatSpecies/Element*Add/Read), it's not safe
2460 // against data read by ReadXML directly or if scat_data(_raw) has been (partly)
2461 // produced from scat_data_singleTmatrix. That would be too costly here,
2462 // though.
2463 // Also, we can't check here whether data is at the correct frequency since we
2464 // don't know f_grid and f_index here (we could pass it in, though).
2465 if (scat_data_mono[0][0].f_grid.nelem() > 1) {
2466 ostringstream os;
2467 os << "Scattering data seems to be *scat_data* (several freq points),\n"
2468 << "but *scat_data_mono* (1 freq point only) is expected here.";
2469 throw runtime_error(os.str());
2470 }
2471
2472 GridPos T_gp = {0, {0, 1}}, Tred_gp;
2473 Vector itw(2);
2474
2475 // Initialisation
2476 pha_mat_spt = 0.;
2477
2478 Index i_se_flat = 0;
2479 for (Index i_ss = 0; i_ss < scat_data_mono.nelem(); i_ss++) {
2480 for (Index i_se = 0; i_se < scat_data_mono[i_ss].nelem(); i_se++) {
2481 // If the particle number density at a specific point in the
2482 // atmosphere for scattering element i_se is zero, we don't need to
2483 // do the transformation!
2484 if (pnd_field(i_se_flat, scat_p_index, scat_lat_index, scat_lon_index) >
2485 PND_LIMIT) {
2486 // Temporary phase matrix which includes all temperatures.
2487 Index nT = scat_data_mono[i_ss][i_se].pha_mat_data.nvitrines();
2488 Tensor3 pha_mat_spt_tmp(nT, pha_mat_spt.nrows(), pha_mat_spt.ncols());
2489
2490 pha_mat_spt_tmp = 0.;
2491
2492 Index ti = -1;
2493 if (nT == 1) // just 1 T_grid element
2494 {
2495 ti = 0;
2496 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2497 // pick one temperature'
2498 {
2499 if (rtp_temperature > -10.) // lowest T-point
2500 {
2501 ti = 0;
2502 } else if (rtp_temperature > -20.) // highest T-point
2503 {
2504 ti = nT - 1;
2505 } else // median T-point
2506 {
2507 ti = nT / 2;
2508 }
2509 } else {
2510 ostringstream os;
2511 os << "In pha_mat_sptFromMonoData.\n"
2512 << "The temperature grid of the scattering data does not\n"
2513 << "cover the atmospheric temperature at cloud location.\n"
2514 << "The data should include the value T = " << rtp_temperature
2515 << " K.";
2517 os.str(), scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2518
2519 // Gridpositions:
2520 gridpos(T_gp, scat_data_mono[i_ss][i_se].T_grid, rtp_temperature);
2521 gridpos_copy(Tred_gp, T_gp);
2522 Tred_gp.idx = 0;
2523 // Interpolation weights:
2524 interpweights(itw, Tred_gp);
2525 }
2526
2527 // Do the transformation into the laboratory coordinate system.
2528 for (Index za_inc_idx = 0; za_inc_idx < doit_za_grid_size;
2529 za_inc_idx++) {
2530 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2531 aa_inc_idx++) {
2532 if (ti < 0) // Temperature interpolation
2533 {
2534 for (Index t_idx = 0; t_idx < 2; t_idx++) {
2536 pha_mat_spt_tmp(t_idx, joker, joker),
2537 scat_data_mono[i_ss][i_se].pha_mat_data(
2538 0, t_idx + T_gp.idx, joker, joker, joker, joker, joker),
2539 scat_data_mono[i_ss][i_se].za_grid,
2540 scat_data_mono[i_ss][i_se].aa_grid,
2541 scat_data_mono[i_ss][i_se].ptype,
2542 za_index,
2543 aa_index,
2544 za_inc_idx,
2545 aa_inc_idx,
2546 za_grid,
2547 aa_grid,
2548 verbosity);
2549 }
2550
2551 for (Index i = 0; i < stokes_dim; i++) {
2552 for (Index j = 0; j < stokes_dim; j++) {
2553 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, i, j) =
2554 interp(itw, pha_mat_spt_tmp(joker, i, j), Tred_gp);
2555 }
2556 }
2557 } else // no temperature interpolation required
2558 {
2560 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2561 scat_data_mono[i_ss][i_se].pha_mat_data(
2562 0, ti, joker, joker, joker, joker, joker),
2563 scat_data_mono[i_ss][i_se].za_grid,
2564 scat_data_mono[i_ss][i_se].aa_grid,
2565 scat_data_mono[i_ss][i_se].ptype,
2566 za_index,
2567 aa_index,
2568 za_inc_idx,
2569 aa_inc_idx,
2570 za_grid,
2571 aa_grid,
2572 verbosity);
2573 }
2574 }
2575 }
2576 }
2577
2578 i_se_flat++;
2579 }
2580 }
2581}
2582
2583/* Workspace method: Doxygen documentation will be auto-generated */
2585 Tensor5& pha_mat_spt,
2586 // Input:
2587 const ArrayOfArrayOfSingleScatteringData& scat_data,
2588 const Index& scat_data_checked,
2589 const Vector& za_grid,
2590 const Vector& aa_grid,
2591 const Index& za_index, // propagation directions
2592 const Index& aa_index,
2593 const Index& f_index,
2594 const Numeric& rtp_temperature,
2595 const Tensor4& pnd_field,
2596 const Index& scat_p_index,
2597 const Index& scat_lat_index,
2598 const Index& scat_lon_index,
2599 const Verbosity& verbosity) {
2600 if (scat_data_checked != 1)
2601 throw runtime_error(
2602 "The scattering data must be flagged to have "
2603 "passed a consistency check (scat_data_checked=1).");
2604
2605 const Index stokes_dim = pha_mat_spt.ncols();
2606 if (stokes_dim > 4 || stokes_dim < 1) {
2607 throw runtime_error(
2608 "The dimension of the stokes vector \n"
2609 "must be 1,2,3 or 4");
2610 }
2611
2612 // Determine total number of scattering elements
2613 const Index N_se_total = TotalNumberOfElements(scat_data);
2614 if (N_se_total != pnd_field.nbooks()) {
2615 ostringstream os;
2616 os << "Total number of scattering elements in scat_data "
2617 << "inconsistent with size of pnd_field.";
2618 throw runtime_error(os.str());
2619 }
2620 // as pha_mat_spt is typically initialized from pnd_field, this theoretically
2621 // checks the same as the runtime_error above. Still, we keep it to be on the
2622 // save side.
2623 ARTS_ASSERT(pha_mat_spt.nshelves() == N_se_total);
2624
2625 const Index N_ss = scat_data.nelem();
2626
2627 // Phase matrix in laboratory coordinate system. Dimensions:
2628 // [frequency, za_inc, aa_inc, stokes_dim, stokes_dim]
2629 Tensor5 pha_mat_data_int;
2630
2631 Index this_f_index;
2632
2633 Index i_se_flat = 0;
2634 // Loop over scattering species
2635 for (Index i_ss = 0; i_ss < N_ss; i_ss++) {
2636 const Index N_se = scat_data[i_ss].nelem();
2637
2638 // Loop over the included scattering elements
2639 for (Index i_se = 0; i_se < N_se; i_se++) {
2640 // If the particle number density at a specific point in the
2641 // atmosphere for the i_se scattering element is zero, we don't need
2642 // to do the transfromation!
2643 if (abs(pnd_field(
2644 i_se_flat, scat_p_index, scat_lat_index, scat_lon_index)) >
2645 PND_LIMIT) {
2646 // First we have to transform the data from the coordinate system
2647 // used in the database (depending on the kind of ptype) to the
2648 // laboratory coordinate system.
2649
2650 // Resize the variables for the interpolated data (1freq, 1T):
2651 pha_mat_data_int.resize(PHA_MAT_DATA.nshelves(),
2652 PHA_MAT_DATA.nbooks(),
2653 PHA_MAT_DATA.npages(),
2654 PHA_MAT_DATA.nrows(),
2655 PHA_MAT_DATA.ncols());
2656
2657 // Frequency extraction and temperature interpolation
2658
2659 // Gridpositions and interpolation weights;
2660 GridPos t_gp;
2661 Vector itw;
2662 Index this_T_index = -1;
2663 if (PHA_MAT_DATA.nvitrines() == 1) {
2664 this_T_index = 0;
2665 } else if (rtp_temperature < 0.) // coding for 'not interpolate, but
2666 // pick one temperature'
2667 {
2668 if (rtp_temperature > -10.) // lowest T-point
2669 {
2670 this_T_index = 0;
2671 } else if (rtp_temperature > -20.) // highest T-point
2672 {
2673 this_T_index = PHA_MAT_DATA.nvitrines() - 1;
2674 } else // median T-point
2675 {
2676 this_T_index = PHA_MAT_DATA.nvitrines() / 2;
2677 }
2678 } else {
2679 ostringstream os;
2680 os << "In pha_mat_sptFromScat_data.\n"
2681 << "The temperature grid of the scattering data does not\n"
2682 << "cover the atmospheric temperature at cloud location.\n"
2683 << "The data should include the value T = " << rtp_temperature
2684 << " K.";
2685 chk_interpolation_grids(os.str(), T_DATAGRID, rtp_temperature);
2686
2687 gridpos(t_gp, T_DATAGRID, rtp_temperature);
2688
2689 // Interpolation weights:
2690 itw.resize(2);
2691 interpweights(itw, t_gp);
2692 }
2693
2694 if (PHA_MAT_DATA.nlibraries() == 1)
2695 this_f_index = 0;
2696 else
2697 this_f_index = f_index;
2698
2699 if (this_T_index < 0) {
2700 // Interpolation of scattering matrix:
2701 for (Index i_za_sca = 0; i_za_sca < PHA_MAT_DATA.nshelves();
2702 i_za_sca++)
2703 for (Index i_aa_sca = 0; i_aa_sca < PHA_MAT_DATA.nbooks();
2704 i_aa_sca++)
2705 for (Index i_za_inc = 0; i_za_inc < PHA_MAT_DATA.npages();
2706 i_za_inc++)
2707 for (Index i_aa_inc = 0; i_aa_inc < PHA_MAT_DATA.nrows();
2708 i_aa_inc++)
2709 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2710 pha_mat_data_int(
2711 i_za_sca, i_aa_sca, i_za_inc, i_aa_inc, i) =
2712 interp(itw,
2713 PHA_MAT_DATA(this_f_index,
2714 joker,
2715 i_za_sca,
2716 i_aa_sca,
2717 i_za_inc,
2718 i_aa_inc,
2719 i),
2720 t_gp);
2721 } else {
2722 pha_mat_data_int = PHA_MAT_DATA(
2723 this_f_index, this_T_index, joker, joker, joker, joker, joker);
2724 /*
2725 for (Index i_za_sca = 0;
2726 i_za_sca < PHA_MAT_DATA.nshelves(); i_za_sca++)
2727 for (Index i_aa_sca = 0;
2728 i_aa_sca < PHA_MAT_DATA.nbooks(); i_aa_sca++)
2729 for (Index i_za_inc = 0;
2730 i_za_inc < PHA_MAT_DATA.npages(); i_za_inc++)
2731 for (Index i_aa_inc = 0;
2732 i_aa_inc < PHA_MAT_DATA.nrows(); i_aa_inc++)
2733 for (Index i = 0; i < PHA_MAT_DATA.ncols(); i++)
2734 // Interpolation of phase matrix:
2735 pha_mat_data_int(i_za_sca, i_aa_sca,
2736 i_za_inc, i_aa_inc, i) =
2737 PHA_MAT_DATA(this_f_index, this_T_index,
2738 i_za_sca, i_aa_sca,
2739 */
2740 }
2741
2742 // Do the transformation into the laboratory coordinate system.
2743 for (Index za_inc_idx = 0; za_inc_idx < za_grid.nelem();
2744 za_inc_idx++) {
2745 for (Index aa_inc_idx = 0; aa_inc_idx < aa_grid.nelem();
2746 aa_inc_idx++) {
2748 pha_mat_spt(i_se_flat, za_inc_idx, aa_inc_idx, joker, joker),
2749 pha_mat_data_int,
2752 PART_TYPE,
2753 za_index,
2754 aa_index,
2755 za_inc_idx,
2756 aa_inc_idx,
2757 za_grid,
2758 aa_grid,
2759 verbosity);
2760 }
2761 }
2762 }
2763 i_se_flat++;
2764 }
2765 }
2766}
2767
2768/* Workspace method: Doxygen documentation will be auto-generated */
2769void ScatSpeciesMerge( //WS Output:
2770 Tensor4& pnd_field,
2773 ArrayOfString& scat_species,
2774 Index& cloudbox_checked,
2775 //WS Input:
2776 const Index& atmosphere_dim,
2777 const Index& cloudbox_on,
2778 const ArrayOfIndex& cloudbox_limits,
2779 const Tensor3& t_field,
2780 const Tensor3& z_field,
2781 const Matrix& z_surface,
2782 const Verbosity& /*verbosity*/) {
2783 // FIXME:
2784 // so far, this works for both scat_data and scat_data_raw. Needs to be
2785 // adapted, though, once we have WSM that can create Z/K/a with different
2786 // f/T dimensions than scat_data_single.f/T_grid.
2787
2788 // cloudbox variable state should be ok before entering here
2789 if (!cloudbox_checked)
2790 throw std::runtime_error(
2791 "You must call *cloudbox_checkedCalc* before this method.");
2792 //however, we modify cloudbox variables. hence force re-checking the new
2793 //variables by resetting cloudbox_checked to False.
2794 cloudbox_checked = 0;
2795
2796 if (atmosphere_dim != 1)
2797 throw std::runtime_error(
2798 "Merging scattering elements only works with a 1D atmoshere");
2799
2800 // Cloudbox on/off?
2801 if (!cloudbox_on) {
2802 /* Must initialise pnd_field anyway; but empty */
2803 pnd_field.resize(0, 0, 0, 0);
2804 return;
2805 }
2806
2807 // ------- setup new pnd_field and scat_data -------------------
2808 ArrayOfIndex limits(2);
2809 //pressure
2810 limits[0] = cloudbox_limits[0];
2811 limits[1] = cloudbox_limits[1] + 1;
2812
2813 Tensor4 pnd_field_merged(
2814 limits[1] - limits[0], limits[1] - limits[0], 1, 1, 0.);
2815
2816 ArrayOfArrayOfSingleScatteringData scat_data_merged;
2817 scat_data_merged.resize(1);
2818 scat_data_merged[0].resize(pnd_field_merged.nbooks());
2819 ArrayOfArrayOfScatteringMetaData scat_meta_merged;
2820 scat_meta_merged.resize(1);
2821 scat_meta_merged[0].resize(pnd_field_merged.nbooks());
2822 ArrayOfString scat_species_merged;
2823 scat_species_merged.resize(1);
2824 scat_species_merged[0] = "mergedfield-mergedpsd";
2825 for (Index sp = 0; sp < scat_data_merged[0].nelem(); sp++) {
2826 SingleScatteringData& this_part = scat_data_merged[0][sp];
2827 this_part.ptype = scat_data[0][0].ptype;
2828 this_part.description = "Merged scattering elements";
2829 this_part.f_grid = scat_data[0][0].f_grid;
2830 this_part.za_grid = scat_data[0][0].za_grid;
2831 this_part.aa_grid = scat_data[0][0].aa_grid;
2832 this_part.pha_mat_data.resize(scat_data[0][0].pha_mat_data.nlibraries(),
2833 1,
2834 scat_data[0][0].pha_mat_data.nshelves(),
2835 scat_data[0][0].pha_mat_data.nbooks(),
2836 scat_data[0][0].pha_mat_data.npages(),
2837 scat_data[0][0].pha_mat_data.nrows(),
2838 scat_data[0][0].pha_mat_data.ncols());
2839 this_part.ext_mat_data.resize(scat_data[0][0].ext_mat_data.nshelves(),
2840 1,
2841 scat_data[0][0].ext_mat_data.npages(),
2842 scat_data[0][0].ext_mat_data.nrows(),
2843 scat_data[0][0].ext_mat_data.ncols());
2844 this_part.abs_vec_data.resize(scat_data[0][0].abs_vec_data.nshelves(),
2845 1,
2846 scat_data[0][0].abs_vec_data.npages(),
2847 scat_data[0][0].abs_vec_data.nrows(),
2848 scat_data[0][0].abs_vec_data.ncols());
2849 this_part.pha_mat_data = 0.;
2850 this_part.ext_mat_data = 0.;
2851 this_part.abs_vec_data = 0.;
2852 this_part.T_grid.resize(1);
2853 this_part.T_grid[0] = t_field(sp, 0, 0);
2854
2855 ScatteringMetaData& this_meta = scat_meta_merged[0][sp];
2856 ostringstream os;
2857 os << "Merged scattering element of cloudbox-level #" << sp;
2858 this_meta.description = os.str();
2859 this_meta.source = "ARTS internal";
2860 this_meta.refr_index = "Unknown";
2861 this_meta.mass = -1.;
2862 this_meta.diameter_max = -1.;
2863 this_meta.diameter_volume_equ = -1.;
2864 this_meta.diameter_area_equ_aerodynamical = -1.;
2865 }
2866
2867 // Check that all scattering elements have same ptype and data dimensions
2868 SingleScatteringData& first_part = scat_data[0][0];
2869 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2870 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2871 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2872
2873 if (orig_part.ptype != first_part.ptype)
2874 throw std::runtime_error(
2875 "All scattering elements must have the same type");
2876
2877 if (orig_part.f_grid.nelem() != first_part.f_grid.nelem())
2878 throw std::runtime_error(
2879 "All scattering elements must have the same f_grid");
2880
2881 if (!is_size(orig_part.pha_mat_data(
2882 joker, 0, joker, joker, joker, joker, joker),
2883 first_part.pha_mat_data.nlibraries(),
2884 first_part.pha_mat_data.nshelves(),
2885 first_part.pha_mat_data.nbooks(),
2886 first_part.pha_mat_data.npages(),
2887 first_part.pha_mat_data.nrows(),
2888 first_part.pha_mat_data.ncols()))
2889 throw std::runtime_error(
2890 "All scattering elements must have the same pha_mat_data size"
2891 " (except for temperature).");
2892
2893 if (!is_size(orig_part.ext_mat_data(joker, 0, joker, joker, joker),
2894 first_part.ext_mat_data.nshelves(),
2895 first_part.ext_mat_data.npages(),
2896 first_part.ext_mat_data.nrows(),
2897 first_part.ext_mat_data.ncols()))
2898 throw std::runtime_error(
2899 "All scattering elements must have the same ext_mat_data size"
2900 " (except for temperature).");
2901
2902 if (!is_size(orig_part.abs_vec_data(joker, 0, joker, joker, joker),
2903 first_part.abs_vec_data.nshelves(),
2904 first_part.abs_vec_data.npages(),
2905 first_part.abs_vec_data.nrows(),
2906 first_part.abs_vec_data.ncols()))
2907 throw std::runtime_error(
2908 "All scattering elements must have the same abs_vec_data size"
2909 " (except for temperature).");
2910 }
2911 }
2912
2913 //----- Start pnd_field_merged and scat_data_array_merged calculations -----
2914
2915 GridPos T_gp;
2916 Vector itw(2);
2917
2918 Index nlevels = pnd_field_merged.nbooks();
2919 // loop over pressure levels in cloudbox
2920 for (Index i_lv = 0; i_lv < nlevels - 1; i_lv++) {
2921 pnd_field_merged(i_lv, i_lv, 0, 0) = 1.;
2922
2923 SingleScatteringData& this_part = scat_data_merged[0][i_lv];
2924 for (Index i_ss = 0; i_ss < scat_data.nelem(); i_ss++) {
2925 for (Index i_se = 0; i_se < scat_data[i_ss].nelem(); i_se++) {
2926 SingleScatteringData& orig_part = scat_data[i_ss][i_se];
2927 const Index pnd_index = FlattenedIndex(scat_data, i_ss, i_se);
2928
2929 // If the particle number density at a specific point in the
2930 // atmosphere for the i_se scattering element is zero, we don't
2931 // need to do the transformation!
2932 if (pnd_field(pnd_index, i_lv, 0, 0) > PND_LIMIT) //TRS
2933 {
2934 Numeric temperature = this_part.T_grid[0];
2935 if (orig_part.T_grid.nelem() > 1) {
2936 ostringstream os;
2937 os << "The temperature grid of the scattering data "
2938 << "does not cover the\n"
2939 << "atmospheric temperature at cloud location. "
2940 << "The data should\n"
2941 << "include the value T = " << temperature << " K.\n"
2942 << "Offending particle is scat_data[" << i_ss << "][" << i_se
2943 << "]:\n"
2944 << "Description: " << orig_part.description << "\n";
2945 chk_interpolation_grids(os.str(), orig_part.T_grid, temperature);
2946
2947 // Gridpositions:
2948 gridpos(T_gp, orig_part.T_grid, temperature);
2949 // Interpolation weights:
2950 interpweights(itw, T_gp);
2951 }
2952
2954
2955 // Loop over frequencies
2956 for (Index i_f = 0; i_f < orig_part.pha_mat_data.nlibraries();
2957 i_f++) {
2958 // Loop over zenith angles
2959 for (Index i_za = 0; i_za < orig_part.ext_mat_data.npages();
2960 i_za++) {
2961 // Loop over azimuth angles
2962 for (Index i_aa = 0; i_aa < orig_part.ext_mat_data.nrows();
2963 i_aa++) {
2964 // Weighted sum of ext_mat_data and abs_vec_data
2965 if (orig_part.T_grid.nelem() == 1) {
2966 Vector v = orig_part.ext_mat_data(i_f, 0, i_za, i_aa, joker);
2967 v *= pnd_field(pnd_index, i_lv, 0, 0);
2968 this_part.ext_mat_data(i_f, 0, i_za, 0, joker) += v;
2969
2970 v = orig_part.abs_vec_data(i_f, 0, i_za, i_aa, joker);
2971 v *= pnd_field(pnd_index, i_lv, 0, 0);
2972 this_part.abs_vec_data(i_f, 0, i_za, i_aa, joker) += v;
2973 } else {
2974 for (Index i = 0; i < orig_part.ext_mat_data.ncols(); i++) {
2975 // Temperature interpolation
2976 this_part.ext_mat_data(i_f, 0, i_za, i_aa, i) +=
2977 pnd_field(pnd_index, i_lv, 0, 0) *
2978 interp(
2979 itw,
2980 orig_part.ext_mat_data(i_f, joker, i_za, i_aa, i),
2981 T_gp);
2982 }
2983 for (Index i = 0; i < orig_part.abs_vec_data.ncols(); i++) {
2984 // Temperature interpolation
2985 this_part.abs_vec_data(i_f, 0, i_za, i_aa, i) +=
2986 pnd_field(pnd_index, i_lv, 0, 0) *
2987 interp(
2988 itw,
2989 orig_part.abs_vec_data(i_f, joker, i_za, i_aa, i),
2990 T_gp);
2991 }
2992 }
2993 }
2994 }
2995
2997
2998 // Loop over outgoing zenith angles
2999 for (Index i_za_out = 0;
3000 i_za_out < orig_part.pha_mat_data.nshelves();
3001 i_za_out++) {
3002 // Loop over outgoing azimuth angles
3003 for (Index i_aa_out = 0;
3004 i_aa_out < orig_part.pha_mat_data.nbooks();
3005 i_aa_out++) {
3006 // Loop over incoming zenith angles
3007 for (Index i_za_inc = 0;
3008 i_za_inc < orig_part.pha_mat_data.npages();
3009 i_za_inc++) {
3010 // Loop over incoming azimuth angles
3011 for (Index i_aa_inc = 0;
3012 i_aa_inc < orig_part.pha_mat_data.nrows();
3013 i_aa_inc++) {
3014 // Weighted sum of pha_mat_data
3015 if (orig_part.T_grid.nelem() == 1) {
3016 Vector v = orig_part.pha_mat_data(i_f,
3017 0,
3018 i_za_out,
3019 i_aa_out,
3020 i_za_inc,
3021 i_aa_inc,
3022 joker);
3023 v *= pnd_field(pnd_index, i_lv, 0, 0);
3024 this_part.pha_mat_data(i_f,
3025 0,
3026 i_za_out,
3027 i_aa_out,
3028 i_za_inc,
3029 i_aa_inc,
3030 joker) = v;
3031 } else {
3032 // Temperature interpolation
3033 for (Index i = 0; i < orig_part.pha_mat_data.ncols();
3034 i++) {
3035 this_part.pha_mat_data(i_f,
3036 0,
3037 i_za_out,
3038 i_aa_out,
3039 i_za_inc,
3040 i_aa_inc,
3041 i) +=
3042 pnd_field(pnd_index, i_lv, 0, 0) *
3043 interp(itw,
3044 orig_part.pha_mat_data(i_f,
3045 joker,
3046 i_za_out,
3047 i_aa_out,
3048 i_za_inc,
3049 i_aa_inc,
3050 i),
3051 T_gp);
3052 }
3053 }
3054 }
3055 }
3056 }
3057 }
3058 }
3059 }
3060 }
3061 }
3062 }
3063
3064 // Set new pnd_field at lowest altitude to 0 if the cloudbox doesn't touch
3065 // the ground.
3066 // The consistency for the original pnd_field has already been ensured by
3067 // cloudbox_checkedCalc
3068 if (z_field(cloudbox_limits[0], 0, 0) > z_surface(0, 0))
3069 pnd_field_merged(0, 0, 0, 0) = 0.;
3070
3071 pnd_field = pnd_field_merged;
3072 scat_data = scat_data_merged;
3073 scat_meta = scat_meta_merged;
3074 scat_species = scat_species_merged;
3075}
3076
3077/* Workspace method: Doxygen documentation will be auto-generated */
3079 //WS Output:
3080 Vector& meta_param,
3081 //WS Input:
3082 const ArrayOfArrayOfScatteringMetaData& scat_meta,
3083 const String& meta_name,
3084 const Index& scat_species_index,
3085 const Verbosity& /*verbosity*/) {
3086 if (scat_species_index < 0) {
3087 ostringstream os;
3088 os << "scat_species_index can't be <0!";
3089 throw runtime_error(os.str());
3090 }
3091
3092 const Index nss = scat_meta.nelem();
3093
3094 // check that scat_meta actually has at least scat_species_index elements
3095 if (!(nss > scat_species_index)) {
3096 ostringstream os;
3097 os << "Can not extract data for scattering species #" << scat_species_index
3098 << "\n"
3099 << "because scat_meta has only " << nss << " elements.";
3100 throw runtime_error(os.str());
3101 }
3102
3103 const Index nse = scat_meta[scat_species_index].nelem();
3104 meta_param.resize(nse);
3105
3106 for (Index i = 0; i < nse; i++) {
3107 if (meta_name == "mass")
3108 meta_param[i] = scat_meta[scat_species_index][i].mass;
3109 else if (meta_name == "diameter_max")
3110 meta_param[i] = scat_meta[scat_species_index][i].diameter_max;
3111 else if (meta_name == "diameter_volume_equ")
3112 meta_param[i] = scat_meta[scat_species_index][i].diameter_volume_equ;
3113 else if (meta_name == "diameter_area_equ_aerodynamical")
3114 meta_param[i] =
3115 scat_meta[scat_species_index][i].diameter_area_equ_aerodynamical;
3116 else {
3117 ostringstream os;
3118 os << "Meta parameter \"" << meta_name << "\"is unknown.";
3119 throw runtime_error(os.str());
3120 }
3121 }
3122}
3123
3124
This file contains the definition of Array.
Index TotalNumberOfElements(const Array< Array< base > > &aa)
Determine total number of elements in an ArrayOfArray.
Definition: array.h:343
Index FlattenedIndex(const Array< Array< base > > &aa, Index outer, Index inner=0)
Determine the index of an element in a flattened version of the array.
Definition: array.h:354
The global header file for ARTS.
Index nbooks
Index npages
Index nvitrines
Index nshelves
void pha_mat_spt_agendaExecute(Workspace &ws, Tensor5 &pha_mat_spt, const Index za_index, const Index scat_lat_index, const Index scat_lon_index, const Index scat_p_index, const Index aa_index, const Numeric rtp_temperature, const Agenda &input_agenda)
Definition: auto_md.cc:24022
void chk_interpolation_grids(const String &which_interpolation, ConstVectorView old_grid, ConstVectorView new_grid, const Index order, const Numeric &extpolfac, const bool islog)
Check interpolation grids.
Definition: check_input.cc:906
The Agenda class.
Definition: agenda_class.h:44
This can be used to make arrays out of anything.
Definition: array.h:107
Index nelem() const ARTS_NOEXCEPT
Number of elements.
Definition: array.h:195
Index nbooks() const
Returns the number of books.
Definition: matpackIV.cc:55
Index ncols() const
Returns the number of columns.
Definition: matpackIV.cc:64
Index ncols() const
Returns the number of columns.
Definition: matpackV.cc:56
Index nshelves() const
Returns the number of shelves.
Definition: matpackV.cc:44
Index npages() const
Returns the number of pages.
Definition: matpackV.cc:50
Index nrows() const
Returns the number of rows.
Definition: matpackV.cc:53
Index nbooks() const
Returns the number of books.
Definition: matpackV.cc:47
Index nshelves() const
Returns the number of shelves.
Definition: matpackVII.cc:48
Index nrows() const
Returns the number of rows.
Definition: matpackVII.cc:57
Index npages() const
Returns the number of pages.
Definition: matpackVII.cc:54
Index ncols() const
Returns the number of columns.
Definition: matpackVII.cc:60
Index nlibraries() const
Returns the number of libraries.
Definition: matpackVII.cc:42
Index nbooks() const
Returns the number of books.
Definition: matpackVII.cc:51
A constant view of a Vector.
Definition: matpackI.h:489
Index nelem() const ARTS_NOEXCEPT
Returns the number of elements.
Definition: matpackI.cc:48
The Matrix class.
Definition: matpackI.h:1225
Index NumberOfFrequencies() const
The number of frequencies of the propagation matrix.
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Multiply input by scalar and add to this.
void SetZero()
Sets all data to zero.
Index StokesDimensions() const
The stokes dimension of the propagation matrix.
The range class.
Definition: matpackI.h:165
Stokes vector is as Propagation matrix but only has 4 possible values.
void MultiplyAndAdd(const Numeric x, const PropagationMatrix &y)
Add a scaled version of the input.
The Tensor3 class.
Definition: matpackIII.h:339
void resize(Index p, Index r, Index c)
Resize function.
Definition: matpackIII.cc:658
The Tensor4 class.
Definition: matpackIV.h:421
void resize(Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackIV.cc:1058
The Tensor5 class.
Definition: matpackV.h:506
void resize(Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackV.cc:1741
The Tensor6 class.
Definition: matpackVI.h:1088
The Tensor7 class.
Definition: matpackVII.h:2382
void resize(Index l, Index v, Index s, Index b, Index p, Index r, Index c)
Resize function.
Definition: matpackVII.cc:5482
The Vector class.
Definition: matpackI.h:876
void resize(Index n)
Resize function.
Definition: matpackI.cc:408
Workspace class.
Definition: workspace_ng.h:40
void toupper()
Convert to upper case.
Definition: mystring.h:75
#define DEBUG_ONLY(...)
Definition: debug.h:52
#define ARTS_ASSERT(condition,...)
Definition: debug.h:83
#define ARTS_USER_ERROR_IF(condition,...)
Definition: debug.h:134
The declarations of all the exception classes.
void gridpos(ArrayOfGridPos &gp, ConstVectorView old_grid, ConstVectorView new_grid, const Numeric &extpolfac)
Set up a grid position Array.
void interpweights(VectorView itw, const GridPos &tc)
Red 1D interpolation weights.
Numeric interp(ConstVectorView itw, ConstVectorView a, const GridPos &tc)
Red 1D Interpolate.
void gridpos_copy(GridPos &gp_new, const GridPos &gp_old)
gridpos_copy
Header file for interpolation.cc.
#define abs(x)
bool is_size(ConstVectorView x, const Index &n)
Verifies that the size of x is l.
Definition: logic.cc:79
bool is_same_within_epsilon(const Numeric &a, const Numeric &b, const Numeric &epsilon)
Check, if two numbers agree within a given epsilon.
Definition: logic.cc:351
Header file for logic.cc.
void opt_prop_sptFromScat_data(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromScat_data.
void abs_vecAddGas(StokesVector &abs_vec, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: abs_vecAddGas.
void ext_matAddGas(PropagationMatrix &ext_mat, const PropagationMatrix &propmat_clearsky, const Verbosity &)
WORKSPACE METHOD: ext_matAddGas.
void scat_data_monoExtract(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoExtract.
void scat_dataCalc(ArrayOfArrayOfSingleScatteringData &scat_data, const ArrayOfArrayOfSingleScatteringData &scat_data_raw, const Vector &f_grid, const Index &interp_order, const Verbosity &)
WORKSPACE METHOD: scat_dataCalc.
void pha_matCalc(Tensor4 &pha_mat, const Tensor5 &pha_mat_spt, const Tensor4 &pnd_field, const Index &atmosphere_dim, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_matCalc.
void scat_dataReduceT(ArrayOfArrayOfSingleScatteringData &scat_data, const Index &i_ss, const Numeric &T, const Index &interp_order, const Index &phamat_only, const Numeric &threshold, const Verbosity &)
WORKSPACE METHOD: scat_dataReduceT.
#define PHA_MAT_DATA
#define PART_TYPE
void ExtractFromMetaSingleScatSpecies(Vector &meta_param, const ArrayOfArrayOfScatteringMetaData &scat_meta, const String &meta_name, const Index &scat_species_index, const Verbosity &)
WORKSPACE METHOD: ExtractFromMetaSingleScatSpecies.
void opt_prop_sptFromMonoData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromMonoData.
#define ABS_VEC_DATA
#define AA_DATAGRID
#define EXT_MAT_DATA
void pha_mat_sptFromMonoData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromMonoData.
void pha_mat_sptFromData(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromData.
const Numeric RAD2DEG
void pha_mat_sptFromDataDOITOpt(Tensor5 &pha_mat_spt, const ArrayOfTensor7 &pha_mat_sptDOITOpt, const ArrayOfArrayOfSingleScatteringData &scat_data_mono, const Index &doit_za_grid_size, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: pha_mat_sptFromDataDOITOpt.
#define T_DATAGRID
void opt_prop_bulkCalc(PropagationMatrix &ext_mat, StokesVector &abs_vec, const ArrayOfPropagationMatrix &ext_mat_spt, const ArrayOfStokesVector &abs_vec_spt, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &)
WORKSPACE METHOD: opt_prop_bulkCalc.
#define PND_LIMIT
const Numeric PI
#define F_DATAGRID
void DoitScatteringDataPrepare(Workspace &ws, ArrayOfTensor7 &pha_mat_sptDOITOpt, ArrayOfArrayOfSingleScatteringData &scat_data_mono, Tensor7 &pha_mat_doit, Vector &aa_grid, const Index &doit_za_grid_size, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Index &f_index, const Index &atmosphere_dim, const Index &stokes_dim, const Tensor3 &t_field, const ArrayOfIndex &cloudbox_limits, const Tensor4 &pnd_field, const Agenda &pha_mat_spt_agenda, const Verbosity &verbosity)
WORKSPACE METHOD: DoitScatteringDataPrepare.
void opt_prop_sptFromData(ArrayOfPropagationMatrix &ext_mat_spt, ArrayOfStokesVector &abs_vec_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Vector &f_grid, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: opt_prop_sptFromData.
void scat_data_monoCalc(ArrayOfArrayOfSingleScatteringData &scat_data_mono, const ArrayOfArrayOfSingleScatteringData &scat_data, const Vector &f_grid, const Index &f_index, const Verbosity &)
WORKSPACE METHOD: scat_data_monoCalc.
const Numeric DEG2RAD
void pha_mat_sptFromScat_data(Tensor5 &pha_mat_spt, const ArrayOfArrayOfSingleScatteringData &scat_data, const Index &scat_data_checked, const Vector &za_grid, const Vector &aa_grid, const Index &za_index, const Index &aa_index, const Index &f_index, const Numeric &rtp_temperature, const Tensor4 &pnd_field, const Index &scat_p_index, const Index &scat_lat_index, const Index &scat_lon_index, const Verbosity &verbosity)
WORKSPACE METHOD: pha_mat_sptFromScat_data.
#define ZA_DATAGRID
void scat_dataCheck(const ArrayOfArrayOfSingleScatteringData &scat_data, const String &check_type, const Numeric &threshold, const Verbosity &verbosity)
WORKSPACE METHOD: scat_dataCheck.
void ScatSpeciesMerge(Tensor4 &pnd_field, ArrayOfArrayOfSingleScatteringData &scat_data, ArrayOfArrayOfScatteringMetaData &scat_meta, ArrayOfString &scat_species, Index &cloudbox_checked, const Index &atmosphere_dim, const Index &cloudbox_on, const ArrayOfIndex &cloudbox_limits, const Tensor3 &t_field, const Tensor3 &z_field, const Matrix &z_surface, const Verbosity &)
WORKSPACE METHOD: ScatSpeciesMerge.
void nlinspace(Vector &x, const Numeric start, const Numeric stop, const Index n)
nlinspace
Definition: math_funcs.cc:225
Numeric AngIntegrate_trapezoid(ConstMatrixView Integrand, ConstVectorView za_grid, ConstVectorView aa_grid)
AngIntegrate_trapezoid.
Definition: math_funcs.cc:290
INDEX Index
The type to use for all integer numbers and indices.
Definition: matpack.h:39
NUMERIC Numeric
The type to use for all floating point numbers.
Definition: matpack.h:33
const Joker joker
Declarations having to do with the four output streams.
#define CREATE_OUT1
Definition: messages.h:205
#define CREATE_OUT2
Definition: messages.h:206
#define CREATE_OUT0
Definition: messages.h:204
constexpr Numeric l(const Index p0, const Index n, const Numeric x, const SortedVectorType &xi, const Index j, const std::pair< Numeric, Numeric > cycle={ -180, 180}) noexcept
void reinterp(VectorView out, const ConstVectorView &iy, const Grid< Vector, 1 > &iw, const Array< Lagrange > &dim0)
Array< Lagrange > LagrangeVector(const ConstVectorView &xs, const ConstVectorView &xi, const Index polyorder, const Numeric extrapol, const bool do_derivs, const GridType type, const std::pair< Numeric, Numeric > cycle)
constexpr bool isnan(double d) noexcept
Definition: nonstd.h:39
void abs_vecTransform(StokesVector &abs_vec_lab, ConstTensor3View abs_vec_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of absorption vector.
void ext_matTransform(PropagationMatrix &ext_mat_lab, ConstTensor3View ext_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Numeric &za_sca, const Numeric &aa_sca, const Verbosity &verbosity)
Transformation of extinction matrix.
void pha_matTransform(MatrixView pha_mat_lab, ConstTensor5View pha_mat_data, ConstVectorView za_datagrid, ConstVectorView aa_datagrid, const PType &ptype, const Index &za_sca_idx, const Index &aa_sca_idx, const Index &za_inc_idx, const Index &aa_inc_idx, ConstVectorView za_grid, ConstVectorView aa_grid, const Verbosity &verbosity)
Transformation of phase matrix.
Scattering database structure and functions.
@ PTYPE_AZIMUTH_RND
Definition: optproperties.h:54
@ PTYPE_TOTAL_RND
Definition: optproperties.h:55
#define v
Contains sorting routines.
Structure to store a grid position.
Definition: interpolation.h:73
Index idx
Definition: interpolation.h:74
A Lagrange interpolation computer.
Numeric diameter_area_equ_aerodynamical
Numeric diameter_volume_equ
This file contains basic functions to handle XML data files.