cloudy trunk
Loading...
Searching...
No Matches
grid_xspec.cpp
Go to the documentation of this file.
1/* This file is part of Cloudy and is copyright (C)1978-2013 by Gary J. Ferland and
2 * others. For conditions of distribution and use see copyright notice in license.txt */
3/*gridXspec handles all grid calculations, called by griddo */
4/*gridFunc */
5/*GridGatherInCloudy - gathers spectra for each grid calculation to save for end */
6#include "cddefines.h"
7#include "save.h"
8#include "warnings.h"
9#include "optimize.h"
10#include "cddrive.h"
11#include "continuum.h"
12#include "rfield.h"
13#include "grid.h"
14#include "ipoint.h"
15#include "called.h"
16#include "physconst.h"
17#include "prt.h"
18#include "mpi_utilities.h"
19
20/*gridXspec handles all grid calculations, called by grid_do */
21void gridXspec(realnum xc[], long int nInterpVars)
22{
23 long int i;
24
25 DEBUG_ENTRY( "gridXspec()" );
26
27 if( nInterpVars > LIMPAR )
28 {
29 fprintf( ioQQQ, "grid_do: too many parameters are varied, increase LIMPAR\n" );
31 }
32
33 optimize.nOptimiz = 0;
34 grid.nintparm = nInterpVars;
35
36 /* if this is changed there must be some change made to actually
37 * stuff the additional parameter information. */
38 grid.naddparm = 0;
39
40 ASSERT( grid.nintparm + grid.naddparm <= LIMPAR );
41
42 grid.totNumModels = 1;
43 /* >>chng 06 aug 21, allow the number of parameter values to be different for different parameters. */
44 for( i=0; i<nInterpVars; i++ )
45 {
46 /* >>chng 06 sep 4, use grid variable instead of passing to routine. */
47 grid.totNumModels *= grid.numParamValues[i];
48 }
49 // option to cycle through grid multiple times, default is 1
50 grid.totNumModels *= grid.nCycle;
51 ASSERT( grid.totNumModels > 1 );
52
53 grid.paramNames = (char**)MALLOC(sizeof(char*)*(unsigned)(nInterpVars+grid.naddparm) );
54 grid.paramMethods = (long*)MALLOC(sizeof(long)*(unsigned)(nInterpVars+grid.naddparm) );
55 grid.paramRange = (realnum**)MALLOC(sizeof(realnum*)*(unsigned)(nInterpVars+grid.naddparm) );
56 grid.paramData = (realnum**)MALLOC(sizeof(realnum*)*(unsigned)(nInterpVars+grid.naddparm) );
57 grid.interpParameters = (realnum**)MALLOC(sizeof(realnum*)*(unsigned)(grid.totNumModels) );
58
59 for( i=0; i<nInterpVars+grid.naddparm; i++ )
60 {
61 grid.paramNames[i] = (char*)MALLOC(sizeof(char)*(unsigned)(12) );
62 grid.paramRange[i] = (realnum*)MALLOC(sizeof(realnum)*(unsigned)(6) );
63 grid.paramData[i] = (realnum*)MALLOC(sizeof(realnum)*(unsigned)(grid.numParamValues[i]) );
64
65 sprintf( grid.paramNames[i], "%s%ld", "PARAM", i+1 );
66 /* Method is 0 for linear, 1 for logarithmic */
67 grid.paramMethods[i] = 0;
68 /* Initial */
69 grid.paramRange[i][0] = xc[i]+grid.paramIncrements[i]*(grid.numParamValues[i]-1.f)/2.f;
70 /* Delta */
71 grid.paramRange[i][1] = grid.paramIncrements[i]/10.f;
72 /* Minimum */
73 grid.paramRange[i][2] = xc[i];
74 /* Bottom */
75 grid.paramRange[i][3] = xc[i]+grid.paramIncrements[i]/10.f;
76 /* Top */
77 grid.paramRange[i][4] = xc[i]+grid.paramIncrements[i]*(grid.numParamValues[i]-1.f)-grid.paramIncrements[i]/10.f;
78 /* Maximum */
79 grid.paramRange[i][5] = xc[i]+grid.paramIncrements[i]*(grid.numParamValues[i]-1.f);
80
81 for( long j=0; j<grid.numParamValues[i]; j++ )
82 {
83 grid.paramData[i][j] = xc[i]+grid.paramIncrements[i]*j;
84 }
85 }
86
87 for( i=0; i<grid.totNumModels; i++ )
88 {
89 grid.interpParameters[i] = (realnum*)MALLOC(sizeof(realnum)*(unsigned)(nInterpVars) );
90 }
91
92 for( i=0; i< grid.totNumModels; i++ )
93 {
94 long j;
95 realnum variableVector[LIMPAR];
96
97 for( j=0; j<nInterpVars; j++ )
98 {
99 int index;
100 long volumeOtherDimensions = 1;
101
102 /* by "volume", we simply mean the product of the parameter dimensions
103 * AFTER the present index, i.e.:
104 * first "volume" is product of grid.numParamValues[1]*grid.numParamValues[2]*....grid.numParamValues[n]
105 * second "volume" is product of grid.numParamValues[2]*grid.numParamValues[3]*....grid.numParamValues[n]
106 * last "volume" is unity. */
107 for( long k=j+1; k<nInterpVars; k++ )
108 {
109 volumeOtherDimensions *= grid.numParamValues[k];
110 }
111
112 /* For each successive parameter, the "volume" is less than the previous one.
113 * So the left-hand side of this modulus operation increases for each parameter,
114 * which causes the index of each parameter to be incremented more often than the
115 * index of the previous parameter. Thus, the last dimension is incremented
116 * every time, then the second to last dimension is incremented with each repeat
117 * of the last dimension. This repeats until, finally, the first dimension is
118 * incremented. For example, the indices of the parameter vectors for a 2x2x3
119 * box would be ordered as such:
120 * [0][0][0]
121 * [0][0][1]
122 * [0][0][2]
123 * [0][1][0]
124 * [0][1][1]
125 * [0][1][2]
126 * [1][0][0]
127 * [1][0][1]
128 * [1][0][2]
129 * [1][1][0]
130 * [1][1][1]
131 * [1][1][2]
132 */
133 index = (int)( (i/volumeOtherDimensions)%(grid.numParamValues[j]) );
134
135 /* this prevents parameter incrementation for debugging purposes. */
136 if( grid.lgStrictRepeat )
137 variableVector[j] = xc[j];
138 else
139 variableVector[j] = xc[j] + grid.paramIncrements[j]*index;
140
141 grid.interpParameters[i][j] = variableVector[j];
142
143 if( grid.lgLinearIncrements[j] && !optimize.lgOptimizeAsLinear[j] )
144 variableVector[j] = log10(variableVector[j]);
145 }
146
147 for( j=nInterpVars; j<LIMPAR; j++ )
148 {
149 variableVector[j] = xc[j];
150 }
151
152 if( i == grid.totNumModels - 1 )
153 {
154 fixit(); // is this needed ??
155 called.lgTalk = cpu.i().lgMPI_talk();
156 called.lgTalkIsOK = cpu.i().lgMPI_talk();
157 prt.lgFaintOn = true;
158 grid.lgGridDone = true;
159 }
160
161 (void)optimize_func(variableVector,i);
162 }
163 return;
164}
165
166/*GridGatherInCloudy - gathers spectra for each grid calculation to save for end */
168{
169 long i;
170
171 DEBUG_ENTRY( "GridGatherInCloudy()" );
172
173 ASSERT( grid.lgGrid );
174
175 /* malloc some arrays if first call and save continuum energies. */
176 if( grid.Energies.empty() )
177 {
178 long i1, i2;
179
180 // this will happen if we have more MPI ranks than grid points
181 // the highest ranks will not have executed any model
182 if( rfield.nupper <= 0 )
184
185 if( grid.LoEnergy_keV == 0. )
186 grid.ipLoEnergy = 0;
187 else
188 grid.ipLoEnergy = ipoint( grid.LoEnergy_keV * 1000. / EVRYD );
189
190 if( grid.HiEnergy_keV == 0. || grid.HiEnergy_keV >= continuum.filbnd[continuum.nrange] )
191 grid.ipHiEnergy = rfield.nupper - 1;
192 else
193 grid.ipHiEnergy = ipoint( grid.HiEnergy_keV * 1000. / EVRYD );
194
195 grid.numEnergies = grid.ipHiEnergy - grid.ipLoEnergy + 1;
196 ASSERT( grid.numEnergies > 0 );
197 grid.Energies.resize( grid.numEnergies );
198 grid.Spectra.reserve(NUM_OUTPUT_TYPES);
199 for( i1=0; i1 < NUM_OUTPUT_TYPES; i1++ )
200 {
201 if( grid.lgOutputTypeOn[i1] )
202 {
203 grid.Spectra.reserve(i1,grid.totNumModels);
204 for( i2=0; i2 < grid.totNumModels; i2++ )
205 {
206 grid.Spectra.reserve(i1,i2,grid.numEnergies);
207 }
208 }
209 }
210 grid.Spectra.alloc();
211 // this needs to be zeroed out for MPI runs
212 grid.Spectra.zero();
213
214 for( i1=0; i1<grid.numEnergies; i1++ )
215 {
216 long j = grid.ipLoEnergy + i1;
217 grid.Energies[i1] = rfield.AnuOrg[j];
218 }
219 }
220
221 if( optimize.nOptimiz < grid.totNumModels )
222 {
223 ASSERT( optimize.nOptimiz >= 0 );
224
225 for( i=0; i < NUM_OUTPUT_TYPES; i++ )
226 {
227 /* Grab spectrum for xspec printout
228 * at this point nOptimiz has already been incremented for first model */
229 if( grid.lgOutputTypeOn[i] )
230 cdSPEC2( i, grid.numEnergies, grid.ipLoEnergy, grid.ipHiEnergy,
231 &grid.Spectra[i][optimize.nOptimiz][0]);
232 }
233 }
234 else if( optimize.nOptimiz == grid.totNumModels )
235 {
236 if( cpu.i().lgMPI() )
237 {
238 multi_arr<realnum,3> Spectra_Copy = grid.Spectra;
239
240 // combine the grid.Spectra data from all ranks. This is done by adding up
241 // the results from all ranks. All but one should be zero. This is needed
242 // because we do not know which rank calculated which grid point...
243 for( int i=0; i < NUM_OUTPUT_TYPES; ++i )
244 {
245 if( grid.lgOutputTypeOn[i] )
246 {
247 for( int j=0; j < grid.totNumModels; ++j )
248 {
249 MPI::COMM_WORLD.Reduce( &Spectra_Copy[i][j][0],
250 &grid.Spectra[i][j][0],
251 grid.numEnergies,
252 MPI::type(grid.Spectra[i][j][0]),
253 MPI::SUM,
254 0 );
255 }
256 }
257 }
258 MPI::COMM_WORLD.Barrier();
259 }
260 }
261 else
262 {
264 }
265 return;
266}
t_called called
Definition called.cpp:5
FILE * ioQQQ
Definition cddefines.cpp:7
#define ASSERT(exp)
Definition cddefines.h:578
#define MALLOC(exp)
Definition cddefines.h:501
#define EXIT_FAILURE
Definition cddefines.h:140
#define cdEXIT(FAIL)
Definition cddefines.h:434
float realnum
Definition cddefines.h:103
NORETURN void TotalInsanity(void)
Definition service.cpp:886
#define DEBUG_ENTRY(funcname)
Definition cddefines.h:684
void fixit(void)
Definition service.cpp:991
void cdSPEC2(int Option, long int nEnergy, long int ipLoEnergy, long int ipHiEnergy, realnum ReturnedSpectrum[])
void ContCreateMesh(void)
long ipoint(double energy_ryd)
t_continuum continuum
Definition continuum.cpp:5
static t_cpu cpu
Definition cpu.h:355
t_grid grid
Definition grid.cpp:5
const int NUM_OUTPUT_TYPES
Definition grid.h:21
void gridXspec(realnum xc[], long int nInterpVars)
void GridGatherInCloudy()
t_MPI COMM_WORLD
t_optimize optimize
Definition optimize.cpp:5
chi2_type optimize_func(const realnum param[], int grid_index=-1)
const long LIMPAR
Definition optimize.h:61
UNUSED const double EVRYD
Definition physconst.h:189
t_prt prt
Definition prt.cpp:10
t_rfield rfield
Definition rfield.cpp:8