-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathgraingrowth.cpp
More file actions
272 lines (245 loc) · 8.96 KB
/
graingrowth.cpp
File metadata and controls
272 lines (245 loc) · 8.96 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
// graingrowth.hpp
// Anisotropic coarsening algorithms for 2D and 3D sparse phase field (sparsePF) methods
// Questions/comments to gruberja@gmail.com (Jason Gruber)
#ifndef GRAINGROWTH_UPDATE
#define GRAINGROWTH_UPDATE
#include<iomanip>
#include<vector>
#include<cmath>
#include<ctime>
#include<cstdio>
#include<cstdlib>
#include<cassert>
#ifdef BGQ
#include<mpi.h>
#endif
#include"MMSP.hpp"
#include"anisotropy.hpp"
#include"graingrowth.hpp"
#include"tessellate.hpp"
typedef float phi_type;
void print_progress(const int step, const int steps, const int iterations);
bool isLittleEndian() {
short int number = 0x1;
char *numPtr = (char*)&number;
return (numPtr[0] == 1);
}
namespace MMSP {
void generate(int dim, char* filename) {
#if (!defined MPI_VERSION || MPI_VERSION<2) && ((defined CCNI) || (defined BGQ))
std::cerr<<"Error: MPI-2 is required for CCNI."<<std::endl;
exit(1);
#endif
static unsigned long tstart;
int id=0;
#ifdef MPI_VERSION
id = MPI::COMM_WORLD.Get_rank();
int np = MPI::COMM_WORLD.Get_size();
#endif
#ifdef DEBUG
if (id==0) {
if (!isLittleEndian()) std::cout<<"Byte order is big-endian."<<std::endl;
}
#endif
if (dim == 2) {
const int edge = 4096;
//int number_of_fields = edge*10000/2048; //static_cast<int>(pow(edge/10.0,2.0)/4); // edge*10000/2048; // average grain is a disk of radius 10
int number_of_fields = 65536; // assume 4096 ranks
MMSP::grid<2,MMSP::sparse<phi_type> > grid(0, 0, edge, 0, edge);
if (id==0) std::cout<<"Grid origin: ("<<g0(grid,0)<<','<<g0(grid,1)<<"),"
<<" dimensions: "<<g1(grid,0)-g0(grid,0)<<" × "<<g1(grid,1)-g0(grid,1)
<<" with "<<number_of_fields<<" seeds"<<std::flush;
#ifdef MPI_VERSION
number_of_fields /= np;
if (id==0) std::cout<<", "<<number_of_fields<<" per rank"<<std::flush;
#endif
if (id==0) std::cout<<"."<<std::endl;
#if (!defined MPI_VERSION) && ((defined CCNI) || (defined BGQ))
std::cerr<<"Error: CCNI requires MPI."<<std::endl;
std::exit(1);
#endif
tstart = time(NULL);
tessellate<2,phi_type>(grid, number_of_fields);
if (id==0) std::cout<<"Tessellation complete ("<<time(NULL)-tstart<<" sec)."<<std::endl;
#ifdef MPI_VERSION
MPI::COMM_WORLD.Barrier();
#endif
tstart=time(NULL);
output(grid, filename);
if (id==0) std::cout<<"Voronoi tessellation written to "<<filename<<" ("<<time(NULL)-tstart<<" sec)."<<std::endl;
}
if (dim == 3) {
const int edge = 512;
/*
//int number_of_fields = static_cast<int>(pow(edge/8.0,3.0)/8); // Average grain is a sphere of radius 10 voxels
int number_of_fields = static_cast<int>(pow(edge,3.0)/(64*pow(8,3.0)/sqrt(27))); // BCC packing with R=8 vox, Vc=(4R/sqrt(3))^3.
#ifdef MPI_VERSION
while (number_of_fields % np) --number_of_fields;
#endif
*/
MMSP::grid<3,MMSP::sparse<phi_type> > grid(0,0,edge,0,edge,0,edge);
int number_of_fields = 8192;
if (id==0) std::cout<<"Grid origin: ("<<g0(grid,0)<<','<<g0(grid,1)<<','<<g0(grid,2)<<"),"
<<" dimensions: "<<g1(grid,0)-g0(grid,0)<<" × "<<g1(grid,1)-g0(grid,1)<<" × "<<g1(grid,2)-g0(grid,2)
<<" with "<<number_of_fields<<" seeds"<<std::flush;
#ifdef MPI_VERSION
number_of_fields /= np;
if (id==0) std::cout<<", "<<number_of_fields<<" per rank"<<std::flush;
#endif
if (id==0) std::cout<<"."<<std::endl;
#if (!defined MPI_VERSION) && ((defined CCNI) || (defined BGQ))
std::cerr<<"Error: CCNI requires MPI."<<std::endl;
std::exit(1);
#endif
tstart = time(NULL);
tessellate<3,phi_type>(grid, number_of_fields);
//const std::string seedfile("points2.txt");
//tessellate<3,phi_type>(grid, seedfile);
if (id==0) std::cout<<"Tessellation complete ("<<time(NULL)-tstart<<" sec)."<<std::endl;
#ifdef MPI_VERSION
MPI::COMM_WORLD.Barrier();
#endif
tstart=time(NULL);
output(grid, filename);
if (id==0) std::cout<<"Voronoi tessellation written to "<<filename<<" ("<<time(NULL)-tstart<<" sec)."<<std::endl;
}
}
template <int dim> void update(MMSP::grid<dim, sparse<phi_type> >& grid, int steps) {
#if (!defined MPI_VERSION) && ((defined CCNI) || (defined BGQ))
std::cerr<<"Error: MPI is required for CCNI."<<std::endl;
exit(1);
#endif
int id=0;
#ifdef MPI_VERSION
id=MPI::COMM_WORLD.Get_rank();
#endif
const phi_type dt = 0.005;
const phi_type width = 10.0;
const phi_type epsilon = 1.0e-8;
const double T_hi = 773.0; // K
const double T_lo = 673.0; // K
double mu_lo = 1000.0;
double mu_hi = 0.0;
static int iterations = 1;
for (int step = 0; step < steps; step++) {
if (id==0) print_progress(step, steps, iterations);
// update grid must be overwritten each time
ghostswap(grid);
MMSP::grid<dim, sparse<phi_type> > update(grid);
for (int i = 0; i < nodes(grid); i++) {
vector<int> x = position(grid, i);
// determine nonzero fields within
// the neighborhood of this node
// (2 adjacent voxels along each cardinal direction)
sparse<int> s;
for (int j = 0; j < dim; j++)
for (int k = -1; k <= 1; k++) {
x[j] += k;
for (int h = 0; h < length(grid(x)); h++) {
int index = MMSP::index(grid(x), h);
set(s, index) = 1;
}
x[j] -= k;
}
phi_type S = phi_type(length(s));
// if only one field is nonzero,
// then copy this node to update
if (S < 2.0) update(i) = grid(i);
else {
// compute laplacian of each field
sparse<phi_type> lap = laplacian(grid, i);
// compute variational derivatives
sparse<phi_type> dFdp;
for (int h = 0; h < length(s); h++) {
int hindex = MMSP::index(s, h);
for (int j = h + 1; j < length(s); j++) {
int jindex = MMSP::index(s, j);
phi_type gamma = energy(hindex, jindex);
phi_type eps = 4.0 * sqrt(0.5 * gamma * width) / M_PI;
phi_type w = 4.0 * gamma / width;
// Update dFdp_h
set(dFdp, hindex) += 0.5 * eps * eps * lap[jindex] + w * grid(i)[jindex];
// Update dFdp_j, so the inner loop can be over j>h instead of j≠h
set(dFdp, jindex) += 0.5 * eps * eps * lap[hindex] + w * grid(i)[hindex];
}
}
// compute time derivatives
sparse<phi_type> dpdt;
//const phi_type temp = T_lo + (T_hi - T_lo)/(g1(grid,0)-g0(grid,0))*(x[0]-g0(grid,0)); // linterp temperature on [673, 773] along x-axis
// sawtooth linterp temperature on [673, 773] along x-axis for periodic BC
const double temp = T_hi - 2.0*(T_hi - T_lo)/(g1(grid,0)-g0(grid,0))*abs(1.0*x[0]-(g1(grid,0)-g0(grid,0))/2.0);
const double mu = mobility(temp);
if (mu>mu_hi) mu_hi=mu;
else if (mu<mu_lo) mu_lo=mu;
for (int h = 0; h < length(s); h++) {
int hindex = MMSP::index(s, h);
for (int j = h + 1; j < length(s); j++) {
int jindex = MMSP::index(s, j);
set(dpdt, hindex) -= mu * (dFdp[hindex] - dFdp[jindex]);
set(dpdt, jindex) -= mu * (dFdp[jindex] - dFdp[hindex]);
}
}
// compute update values
phi_type sum = 0.0;
for (int h = 0; h < length(s); h++) {
int index = MMSP::index(s, h);
phi_type value = grid(i)[index] + dt * (2.0 / S) * dpdt[index]; // Extraneous factor of 2?
if (value > 1.0) value = 1.0;
if (value < 0.0) value = 0.0;
if (value > epsilon) set(update(i), index) = value;
sum += update(i)[index];
}
// project onto Gibbs simplex (enforce Σφ=1)
phi_type rsum = 0.0;
if (fabs(sum) > 0.0) rsum = 1.0 / sum;
for (int h = 0; h < length(update(i)); h++) {
int index = MMSP::index(update(i), h);
set(update(i), index) *= rsum;
}
}
} // Loop over nodes(grid)
swap(grid, update);
} // Loop over steps
ghostswap(grid);
++iterations;
#ifdef MPI_VERSION
double global_hi, global_lo;
MPI::COMM_WORLD.Allreduce(&mu_lo, &global_lo, 1, MPI_DOUBLE, MPI_MIN);
MPI::COMM_WORLD.Allreduce(&mu_hi, &global_hi, 1, MPI_DOUBLE, MPI_MAX);
mu_hi=global_hi;
mu_lo=global_lo;
#endif
if (id==0)
std::cout<<"mu=["<<mu_lo<<","<<mu_hi<<"]"<<std::endl;
}
template <class T> std::ostream& operator<<(std::ostream& o, sparse<T>& s) {
o<<" Index Value\n";
for (int i=0; i<length(s); ++i) {
int index = MMSP::index(s, i);
o<<" "<<std::setw(5)<<std::right<<index<<" "<<s[index]<<'\n';
}
return o;
}
} // namespace MMSP
void print_progress(const int step, const int steps, const int iterations) {
char* timestring;
static unsigned long tstart;
struct tm* timeinfo;
if (step==0) {
tstart = time(NULL);
std::time_t rawtime;
std::time( &rawtime );
timeinfo = std::localtime( &rawtime );
timestring = std::asctime(timeinfo);
timestring[std::strlen(timestring)-1] = '\0';
std::cout<<"Pass "<<std::setw(3)<<std::right<<iterations<<": "<<timestring<<" ["<<std::flush;
} else if (step==steps-1) {
unsigned long deltat = time(NULL)-tstart;
std::cout<<"•] "<<std::setw(2)<<std::right<<deltat/3600<<"h:"
<<std::setw(2)<<std::right<<(deltat%3600)/60<<"m:"
<<std::setw(2)<<std::right<<deltat%60<<"s"
<<" (File "<<std::setw(5)<<std::right<<iterations*steps<<")."<<std::endl;
} else if ((20 * step) % steps == 0) std::cout<<"• "<<std::flush;
}
#endif
#include"MMSP.main.hpp"