Actual source code: ex9.c
2: static char help[] = "The solution of 2 different linear systems with different linear solvers.\n\
3: Also, this example illustrates the repeated\n\
4: solution of linear systems, while reusing matrix, vector, and solver data\n\
5: structures throughout the process. Note the various stages of event logging.\n\n";
7: /*T
8: Concepts: KSP^repeatedly solving linear systems;
9: Concepts: PetscLog^profiling multiple stages of code;
10: Concepts: PetscLog^user-defined event profiling;
11: Processors: n
12: T*/
14: /*
15: Include "petscksp.h" so that we can use KSP solvers. Note that this file
16: automatically includes:
17: petscsys.h - base PETSc routines petscvec.h - vectors
18: petscmat.h - matrices
19: petscis.h - index sets petscksp.h - Krylov subspace methods
20: petscviewer.h - viewers petscpc.h - preconditioners
21: */
22: #include petscksp.h
24: /*
25: Declare user-defined routines
26: */
32: int main(int argc,char **args)
33: {
34: Vec x1,b1,x2,b2; /* solution and RHS vectors for systems #1 and #2 */
35: Vec u; /* exact solution vector */
36: Mat C1,C2; /* matrices for systems #1 and #2 */
37: KSP ksp1,ksp2; /* KSP contexts for systems #1 and #2 */
38: PetscInt ntimes = 3; /* number of times to solve the linear systems */
39: PetscLogEvent CHECK_ERROR; /* event number for error checking */
40: PetscInt ldim,low,high,iglobal,Istart,Iend,Istart2,Iend2;
41: PetscInt Ii,J,i,j,m = 3,n = 2,its,t;
43: PetscTruth flg = PETSC_FALSE;
44: PetscScalar v;
45: PetscMPIInt rank,size;
46: #if defined (PETSC_USE_LOG)
47: PetscLogStage stages[3];
48: #endif
50: PetscInitialize(&argc,&args,(char *)0,help);
51: PetscOptionsGetInt(PETSC_NULL,"-m",&m,PETSC_NULL);
52: PetscOptionsGetInt(PETSC_NULL,"-t",&ntimes,PETSC_NULL);
53: MPI_Comm_rank(PETSC_COMM_WORLD,&rank);
54: MPI_Comm_size(PETSC_COMM_WORLD,&size);
55: n = 2*size;
57: /*
58: Register various stages for profiling
59: */
60: PetscLogStageRegister("Prelim setup",&stages[0]);
61: PetscLogStageRegister("Linear System 1",&stages[1]);
62: PetscLogStageRegister("Linear System 2",&stages[2]);
64: /*
65: Register a user-defined event for profiling (error checking).
66: */
67: CHECK_ERROR = 0;
68: PetscLogEventRegister("Check Error",KSP_COOKIE,&CHECK_ERROR);
70: /* - - - - - - - - - - - - Stage 0: - - - - - - - - - - - - - -
71: Preliminary Setup
72: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
74: PetscLogStagePush(stages[0]);
76: /*
77: Create data structures for first linear system.
78: - Create parallel matrix, specifying only its global dimensions.
79: When using MatCreate(), the matrix format can be specified at
80: runtime. Also, the parallel partitioning of the matrix is
81: determined by PETSc at runtime.
82: - Create parallel vectors.
83: - When using VecSetSizes(), we specify only the vector's global
84: dimension; the parallel partitioning is determined at runtime.
85: - Note: We form 1 vector from scratch and then duplicate as needed.
86: */
87: MatCreate(PETSC_COMM_WORLD,&C1);
88: MatSetSizes(C1,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
89: MatSetFromOptions(C1);
90: MatGetOwnershipRange(C1,&Istart,&Iend);
91: VecCreate(PETSC_COMM_WORLD,&u);
92: VecSetSizes(u,PETSC_DECIDE,m*n);
93: VecSetFromOptions(u);
94: VecDuplicate(u,&b1);
95: VecDuplicate(u,&x1);
97: /*
98: Create first linear solver context.
99: Set runtime options (e.g., -pc_type <type>).
100: Note that the first linear system uses the default option
101: names, while the second linear systme uses a different
102: options prefix.
103: */
104: KSPCreate(PETSC_COMM_WORLD,&ksp1);
105: KSPSetFromOptions(ksp1);
107: /*
108: Set user-defined monitoring routine for first linear system.
109: */
110: PetscOptionsGetTruth(PETSC_NULL,"-my_ksp_monitor",&flg,PETSC_NULL);
111: if (flg) {KSPMonitorSet(ksp1,MyKSPMonitor,PETSC_NULL,0);}
113: /*
114: Create data structures for second linear system.
115: */
116: MatCreate(PETSC_COMM_WORLD,&C2);
117: MatSetSizes(C2,PETSC_DECIDE,PETSC_DECIDE,m*n,m*n);
118: MatSetFromOptions(C2);
119: MatGetOwnershipRange(C2,&Istart2,&Iend2);
120: VecDuplicate(u,&b2);
121: VecDuplicate(u,&x2);
123: /*
124: Create second linear solver context
125: */
126: KSPCreate(PETSC_COMM_WORLD,&ksp2);
128: /*
129: Set different options prefix for second linear system.
130: Set runtime options (e.g., -s2_pc_type <type>)
131: */
132: KSPAppendOptionsPrefix(ksp2,"s2_");
133: KSPSetFromOptions(ksp2);
135: /*
136: Assemble exact solution vector in parallel. Note that each
137: processor needs to set only its local part of the vector.
138: */
139: VecGetLocalSize(u,&ldim);
140: VecGetOwnershipRange(u,&low,&high);
141: for (i=0; i<ldim; i++) {
142: iglobal = i + low;
143: v = (PetscScalar)(i + 100*rank);
144: VecSetValues(u,1,&iglobal,&v,ADD_VALUES);
145: }
146: VecAssemblyBegin(u);
147: VecAssemblyEnd(u);
149: /*
150: Log the number of flops for computing vector entries
151: */
152: PetscLogFlops(2.0*ldim);
154: /*
155: End curent profiling stage
156: */
157: PetscLogStagePop();
159: /* --------------------------------------------------------------
160: Linear solver loop:
161: Solve 2 different linear systems several times in succession
162: -------------------------------------------------------------- */
164: for (t=0; t<ntimes; t++) {
166: /* - - - - - - - - - - - - Stage 1: - - - - - - - - - - - - - -
167: Assemble and solve first linear system
168: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
170: /*
171: Begin profiling stage #1
172: */
173: PetscLogStagePush(stages[1]);
175: /*
176: Initialize all matrix entries to zero. MatZeroEntries() retains
177: the nonzero structure of the matrix for sparse formats.
178: */
179: if (t > 0) {MatZeroEntries(C1);}
181: /*
182: Set matrix entries in parallel. Also, log the number of flops
183: for computing matrix entries.
184: - Each processor needs to insert only elements that it owns
185: locally (but any non-local elements will be sent to the
186: appropriate processor during matrix assembly).
187: - Always specify global row and columns of matrix entries.
188: */
189: for (Ii=Istart; Ii<Iend; Ii++) {
190: v = -1.0; i = Ii/n; j = Ii - i*n;
191: if (i>0) {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
192: if (i<m-1) {J = Ii + n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
193: if (j>0) {J = Ii - 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
194: if (j<n-1) {J = Ii + 1; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
195: v = 4.0; MatSetValues(C1,1,&Ii,1,&Ii,&v,ADD_VALUES);
196: }
197: for (Ii=Istart; Ii<Iend; Ii++) { /* Make matrix nonsymmetric */
198: v = -1.0*(t+0.5); i = Ii/n;
199: if (i>0) {J = Ii - n; MatSetValues(C1,1,&Ii,1,&J,&v,ADD_VALUES);}
200: }
201: PetscLogFlops(2.0*(Iend-Istart));
203: /*
204: Assemble matrix, using the 2-step process:
205: MatAssemblyBegin(), MatAssemblyEnd()
206: Computations can be done while messages are in transition
207: by placing code between these two statements.
208: */
209: MatAssemblyBegin(C1,MAT_FINAL_ASSEMBLY);
210: MatAssemblyEnd(C1,MAT_FINAL_ASSEMBLY);
212: /*
213: Indicate same nonzero structure of successive linear system matrices
214: */
215: MatSetOption(C1,MAT_NEW_NONZERO_LOCATIONS,PETSC_TRUE);
217: /*
218: Compute right-hand-side vector
219: */
220: MatMult(C1,u,b1);
222: /*
223: Set operators. Here the matrix that defines the linear system
224: also serves as the preconditioning matrix.
225: - The flag SAME_NONZERO_PATTERN indicates that the
226: preconditioning matrix has identical nonzero structure
227: as during the last linear solve (although the values of
228: the entries have changed). Thus, we can save some
229: work in setting up the preconditioner (e.g., no need to
230: redo symbolic factorization for ILU/ICC preconditioners).
231: - If the nonzero structure of the matrix is different during
232: the second linear solve, then the flag DIFFERENT_NONZERO_PATTERN
233: must be used instead. If you are unsure whether the
234: matrix structure has changed or not, use the flag
235: DIFFERENT_NONZERO_PATTERN.
236: - Caution: If you specify SAME_NONZERO_PATTERN, PETSc
237: believes your assertion and does not check the structure
238: of the matrix. If you erroneously claim that the structure
239: is the same when it actually is not, the new preconditioner
240: will not function correctly. Thus, use this optimization
241: feature with caution!
242: */
243: KSPSetOperators(ksp1,C1,C1,SAME_NONZERO_PATTERN);
245: /*
246: Use the previous solution of linear system #1 as the initial
247: guess for the next solve of linear system #1. The user MUST
248: call KSPSetInitialGuessNonzero() in indicate use of an initial
249: guess vector; otherwise, an initial guess of zero is used.
250: */
251: if (t>0) {
252: KSPSetInitialGuessNonzero(ksp1,PETSC_TRUE);
253: }
255: /*
256: Solve the first linear system. Here we explicitly call
257: KSPSetUp() for more detailed performance monitoring of
258: certain preconditioners, such as ICC and ILU. This call
259: is optional, ase KSPSetUp() will automatically be called
260: within KSPSolve() if it hasn't been called already.
261: */
262: KSPSetUp(ksp1);
263: KSPSolve(ksp1,b1,x1);
264: KSPGetIterationNumber(ksp1,&its);
266: /*
267: Check error of solution to first linear system
268: */
269: CheckError(u,x1,b1,its,CHECK_ERROR);
271: /* - - - - - - - - - - - - Stage 2: - - - - - - - - - - - - - -
272: Assemble and solve second linear system
273: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
275: /*
276: Conclude profiling stage #1; begin profiling stage #2
277: */
278: PetscLogStagePop();
279: PetscLogStagePush(stages[2]);
281: /*
282: Initialize all matrix entries to zero
283: */
284: if (t > 0) {MatZeroEntries(C2);}
286: /*
287: Assemble matrix in parallel. Also, log the number of flops
288: for computing matrix entries.
289: - To illustrate the features of parallel matrix assembly, we
290: intentionally set the values differently from the way in
291: which the matrix is distributed across the processors. Each
292: entry that is not owned locally will be sent to the appropriate
293: processor during MatAssemblyBegin() and MatAssemblyEnd().
294: - For best efficiency the user should strive to set as many
295: entries locally as possible.
296: */
297: for (i=0; i<m; i++) {
298: for (j=2*rank; j<2*rank+2; j++) {
299: v = -1.0; Ii = j + n*i;
300: if (i>0) {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
301: if (i<m-1) {J = Ii + n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
302: if (j>0) {J = Ii - 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
303: if (j<n-1) {J = Ii + 1; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
304: v = 6.0 + t*0.5; MatSetValues(C2,1,&Ii,1,&Ii,&v,ADD_VALUES);
305: }
306: }
307: for (Ii=Istart2; Ii<Iend2; Ii++) { /* Make matrix nonsymmetric */
308: v = -1.0*(t+0.5); i = Ii/n;
309: if (i>0) {J = Ii - n; MatSetValues(C2,1,&Ii,1,&J,&v,ADD_VALUES);}
310: }
311: MatAssemblyBegin(C2,MAT_FINAL_ASSEMBLY);
312: MatAssemblyEnd(C2,MAT_FINAL_ASSEMBLY);
313: PetscLogFlops(2.0*(Iend-Istart));
315: /*
316: Indicate same nonzero structure of successive linear system matrices
317: */
318: MatSetOption(C2,MAT_NEW_NONZERO_LOCATIONS,PETSC_FALSE);
320: /*
321: Compute right-hand-side vector
322: */
323: MatMult(C2,u,b2);
325: /*
326: Set operators. Here the matrix that defines the linear system
327: also serves as the preconditioning matrix. Indicate same nonzero
328: structure of successive preconditioner matrices by setting flag
329: SAME_NONZERO_PATTERN.
330: */
331: KSPSetOperators(ksp2,C2,C2,SAME_NONZERO_PATTERN);
333: /*
334: Solve the second linear system
335: */
336: KSPSetUp(ksp2);
337: KSPSolve(ksp2,b2,x2);
338: KSPGetIterationNumber(ksp2,&its);
340: /*
341: Check error of solution to second linear system
342: */
343: CheckError(u,x2,b2,its,CHECK_ERROR);
345: /*
346: Conclude profiling stage #2
347: */
348: PetscLogStagePop();
349: }
350: /* --------------------------------------------------------------
351: End of linear solver loop
352: -------------------------------------------------------------- */
354: /*
355: Free work space. All PETSc objects should be destroyed when they
356: are no longer needed.
357: */
358: KSPDestroy(ksp1); KSPDestroy(ksp2);
359: VecDestroy(x1); VecDestroy(x2);
360: VecDestroy(b1); VecDestroy(b2);
361: MatDestroy(C1); MatDestroy(C2);
362: VecDestroy(u);
364: PetscFinalize();
365: return 0;
366: }
369: /* ------------------------------------------------------------- */
370: /*
371: CheckError - Checks the error of the solution.
373: Input Parameters:
374: u - exact solution
375: x - approximate solution
376: b - work vector
377: its - number of iterations for convergence
378: CHECK_ERROR - the event number for error checking
379: (for use with profiling)
381: Notes:
382: In order to profile this section of code separately from the
383: rest of the program, we register it as an "event" with
384: PetscLogEventRegister() in the main program. Then, we indicate
385: the start and end of this event by respectively calling
386: PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
387: PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
388: Here, we specify the objects most closely associated with
389: the event (the vectors u,x,b). Such information is optional;
390: we could instead just use 0 instead for all objects.
391: */
392: PetscErrorCode CheckError(Vec u,Vec x,Vec b,PetscInt its,PetscLogEvent CHECK_ERROR)
393: {
394: PetscScalar none = -1.0;
395: PetscReal norm;
398: PetscLogEventBegin(CHECK_ERROR,u,x,b,0);
400: /*
401: Compute error of the solution, using b as a work vector.
402: */
403: VecCopy(x,b);
404: VecAXPY(b,none,u);
405: VecNorm(b,NORM_2,&norm);
406: PetscPrintf(PETSC_COMM_WORLD,"Norm of error %A, Iterations %D\n",norm,its);
407: PetscLogEventEnd(CHECK_ERROR,u,x,b,0);
408: return 0;
409: }
410: /* ------------------------------------------------------------- */
413: /*
414: MyKSPMonitor - This is a user-defined routine for monitoring
415: the KSP iterative solvers.
417: Input Parameters:
418: ksp - iterative context
419: n - iteration number
420: rnorm - 2-norm (preconditioned) residual value (may be estimated)
421: dummy - optional user-defined monitor context (unused here)
422: */
423: PetscErrorCode MyKSPMonitor(KSP ksp,PetscInt n,PetscReal rnorm,void *dummy)
424: {
425: Vec x;
428: /*
429: Build the solution vector
430: */
431: KSPBuildSolution(ksp,PETSC_NULL,&x);
433: /*
434: Write the solution vector and residual norm to stdout.
435: - PetscPrintf() handles output for multiprocessor jobs
436: by printing from only one processor in the communicator.
437: - The parallel viewer PETSC_VIEWER_STDOUT_WORLD handles
438: data from multiple processors so that the output
439: is not jumbled.
440: */
441: PetscPrintf(PETSC_COMM_WORLD,"iteration %D solution vector:\n",n);
442: VecView(x,PETSC_VIEWER_STDOUT_WORLD);
443: PetscPrintf(PETSC_COMM_WORLD,"iteration %D KSP Residual norm %14.12e \n",n,rnorm);
444: return 0;
445: }