Actual source code: ex13f90.F

  1: !
  2: !
  3: !/*T
  4: !   Concepts: KSP^basic sequential example
  5: !   Concepts: KSP^Laplacian, 2d
  6: !   Concepts: Laplacian, 2d
  7: !   Processors: 1
  8: !T*/
  9: ! -----------------------------------------------------------------------

 11:       program main
 12:       implicit none

 14: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 15: !                    Include files
 16: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 17: !
 18: !  The following include statements are required for KSP Fortran programs:
 19: !     petscsys.h  - base PETSc routines
 20: !     petscvec.h    - vectors
 21: !     petscmat.h    - matrices
 22: !     petscksp.h    - Krylov subspace methods
 23: !     petscpc.h     - preconditioners
 24: !
 25:  #include finclude/petscsys.h
 26:  #include finclude/petscvec.h
 27:  #include finclude/petscmat.h
 28:  #include finclude/petscksp.h
 29:  #include finclude/petscpc.h

 31: !    User-defined context that contains all the data structures used
 32: !    in the linear solution process.

 34: !   Vec    x,b      /* solution vector, right hand side vector and work vector */
 35: !   Mat    A        /* sparse matrix */
 36: !   KSP   ksp     /* linear solver context */
 37: !   int    m,n      /* grid dimensions */
 38: !
 39: !   Since we cannot store Scalars and integers in the same context,
 40: !   we store the integers/pointers in the user-defined context, and
 41: !   the scalar values are carried in the common block.
 42: !   The scalar values in this simplistic example could easily
 43: !   be recalculated in each routine, where they are needed.
 44: !
 45: !   Scalar hx2,hy2  /* 1/(m+1)*(m+1) and 1/(n+1)*(n+1) */

 47: !  Note: Any user-defined Fortran routines MUST be declared as external.

 49:       external UserInitializeLinearSolver
 50:       external UserFinalizeLinearSolver
 51:       external UserDoLinearSolver

 53: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 54: !                   Variable declarations
 55: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 57:       PetscScalar  hx,hy,x,y
 58:       PetscFortranAddr userctx(6)
 59:       PetscErrorCode ierr
 60:       PetscInt m,n,t,tmax,i,j
 61:       PetscTruth flg
 62:       PetscMPIInt size,rank
 63:       double precision  enorm
 64:       PetscScalar,ALLOCATABLE :: userx(:,:)
 65:       PetscScalar,ALLOCATABLE :: userb(:,:)
 66:       PetscScalar,ALLOCATABLE :: solution(:,:)
 67:       PetscScalar,ALLOCATABLE :: rho(:,:)

 69:       double precision hx2,hy2
 70:       common /param/ hx2,hy2

 72:       tmax = 2
 73:       m = 6
 74:       n = 7

 76: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
 77: !                 Beginning of program
 78: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

 80:       call PetscInitialize(PETSC_NULL_CHARACTER,ierr)
 81:       call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
 82:       if (size .ne. 1) then
 83:          call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
 84:          if (rank .eq. 0) then
 85:             write(6,*) 'This is a uniprocessor example only!'
 86:          endif
 87:          SETERRQ(1,' ',ierr)
 88:       endif

 90: !  The next two lines are for testing only; these allow the user to
 91: !  decide the grid size at runtime.

 93:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-m',m,flg,ierr)
 94:       call PetscOptionsGetInt(PETSC_NULL_CHARACTER,'-n',n,flg,ierr)

 96: !  Create the empty sparse matrix and linear solver data structures

 98:       call UserInitializeLinearSolver(m,n,userctx,ierr)

100: !  Allocate arrays to hold the solution to the linear system.  This
101: !  approach is not normally done in PETSc programs, but in this case,
102: !  since we are calling these routines from a non-PETSc program, we
103: !  would like to reuse the data structures from another code. So in
104: !  the context of a larger application these would be provided by
105: !  other (non-PETSc) parts of the application code.

107:       ALLOCATE (userx(m,n),userb(m,n),solution(m,n))

109: !  Allocate an array to hold the coefficients in the elliptic operator

111:        ALLOCATE (rho(m,n))

113: !  Fill up the array rho[] with the function rho(x,y) = x; fill the
114: !  right-hand-side b[] and the solution with a known problem for testing.

116:       hx = 1.0/(m+1)
117:       hy = 1.0/(n+1)
118:       y  = hy
119:       do 20 j=1,n
120:          x = hx
121:          do 10 i=1,m
122:             rho(i,j)      = x
123:             solution(i,j) = sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
124:             userb(i,j)    = -2.*PETSC_PI*cos(2.*PETSC_PI*x)*              &
125:      &                sin(2.*PETSC_PI*y) +                                &
126:      &                8*PETSC_PI*PETSC_PI*x*                              &
127:      &                sin(2.*PETSC_PI*x)*sin(2.*PETSC_PI*y)
128:            x = x + hx
129:  10      continue
130:          y = y + hy
131:  20   continue

133: !  Loop over a bunch of timesteps, setting up and solver the linear
134: !  system for each time-step.
135: !  Note that this loop is somewhat artificial. It is intended to
136: !  demonstrate how one may reuse the linear solvers in each time-step.

138:       do 100 t=1,tmax
139:          call UserDoLinearSolver(rho,userctx,userb,userx,ierr)

141: !        Compute error: Note that this could (and usually should) all be done
142: !        using the PETSc vector operations. Here we demonstrate using more
143: !        standard programming practices to show how they may be mixed with
144: !        PETSc.
145:          enorm = 0.0
146:          do 90 j=1,n
147:             do 80 i=1,m
148:                enorm = enorm +                                          &
149:      &             (solution(i,j)-userx(i,j))*(solution(i,j)-userx(i,j))
150:  80         continue
151:  90      continue
152:          enorm = enorm * PetscRealPart(hx*hy)
153:          write(6,115) m,n,enorm
154:  115     format ('m = ',I2,' n = ',I2,' error norm = ',1PE10.4)
155:  100  continue

157: !  We are finished solving linear systems, so we clean up the
158: !  data structures.

160:       DEALLOCATE (userx,userb,solution,rho)

162:       call UserFinalizeLinearSolver(userctx,ierr)
163:       call PetscFinalize(ierr)
164:       end

166: ! ----------------------------------------------------------------
167:       subroutine UserInitializeLinearSolver(m,n,userctx,ierr)

169:       implicit none

171:  #include finclude/petscsys.h
172:  #include finclude/petscvec.h
173:  #include finclude/petscmat.h
174:  #include finclude/petscksp.h
175:  #include finclude/petscpc.h

177:       PetscInt m,n
178:       PetscErrorCode ierr
179:       PetscFortranAddr userctx(*)

181:       common /param/ hx2,hy2
182:       double precision hx2,hy2

184: !  Local variable declararions
185:       Mat     A
186:       Vec     b,x
187:       KSP    ksp
188:       PetscInt Ntot


191: !  Here we assume use of a grid of size m x n, with all points on the
192: !  interior of the domain, i.e., we do not include the points corresponding
193: !  to homogeneous Dirichlet boundary conditions.  We assume that the domain
194: !  is [0,1]x[0,1].

196:       hx2 = (m+1)*(m+1)
197:       hy2 = (n+1)*(n+1)
198:       Ntot = m*n

200: !  Create the sparse matrix. Preallocate 5 nonzeros per row.

202:       call MatCreateSeqAIJ(PETSC_COMM_SELF,Ntot,Ntot,5,                  &
203:      &     PETSC_NULL_INTEGER,A,ierr)
204: !
205: !  Create vectors. Here we create vectors with no memory allocated.
206: !  This way, we can use the data structures already in the program
207: !  by using VecPlaceArray() subroutine at a later stage.
208: !
209:       call VecCreateSeqWithArray(PETSC_COMM_SELF,Ntot,                  &
210:      &     PETSC_NULL_SCALAR,b,ierr)
211:       call VecDuplicate(b,x,ierr)

213: !  Create linear solver context. This will be used repeatedly for all
214: !  the linear solves needed.

216:       call KSPCreate(PETSC_COMM_SELF,ksp,ierr)

218:       userctx(1) = x
219:       userctx(2) = b
220:       userctx(3) = A
221:       userctx(4) = ksp
222:       userctx(5) = m
223:       userctx(6) = n

225:       return
226:       end
227: ! -----------------------------------------------------------------------

229: !   Solves -div (rho grad psi) = F using finite differences.
230: !   rho is a 2-dimensional array of size m by n, stored in Fortran
231: !   style by columns. userb is a standard one-dimensional array.

233:       subroutine UserDoLinearSolver(rho,userctx,userb,userx,ierr)

235:       implicit none

237:  #include finclude/petscsys.h
238:  #include finclude/petscvec.h
239:  #include finclude/petscmat.h
240:  #include finclude/petscksp.h
241:  #include finclude/petscpc.h

243:       PetscErrorCode ierr
244:       PetscFortranAddr userctx(*)
245:       PetscScalar rho(*),userb(*),userx(*)


248:       common /param/ hx2,hy2
249:       double precision hx2,hy2

251:       PC   pc
252:       KSP ksp
253:       Vec  b,x
254:       Mat  A
255:       PetscInt m,n,one
256:       PetscInt i,j,II,JJ
257:       PetscScalar  v

259: !      PetscScalar tmpx(*),tmpb(*)

261:       one  = 1
262:       x    = userctx(1)
263:       b    = userctx(2)
264:       A    = userctx(3)
265:       ksp  = userctx(4)
266:       m    = int(userctx(5))
267:       n    = int(userctx(6))

269: !  This is not the most efficient way of generating the matrix,
270: !  but let's not worry about it.  We should have separate code for
271: !  the four corners, each edge and then the interior. Then we won't
272: !  have the slow if-tests inside the loop.
273: !
274: !  Compute the operator
275: !          -div rho grad
276: !  on an m by n grid with zero Dirichlet boundary conditions. The rho
277: !  is assumed to be given on the same grid as the finite difference
278: !  stencil is applied.  For a staggered grid, one would have to change
279: !  things slightly.

281:       II = 0
282:       do 110 j=1,n
283:          do 100 i=1,m
284:             if (j .gt. 1) then
285:                JJ = II - m
286:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
287:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr)
288:             endif
289:             if (j .lt. n) then
290:                JJ = II + m
291:                v = -0.5*(rho(II+1) + rho(JJ+1))*hy2
292:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr)
293:             endif
294:             if (i .gt. 1) then
295:                JJ = II - 1
296:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
297:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr)
298:             endif
299:             if (i .lt. m) then
300:                JJ = II + 1
301:                v = -0.5*(rho(II+1) + rho(JJ+1))*hx2
302:                call MatSetValues(A,one,II,one,JJ,v,INSERT_VALUES,ierr)
303:             endif
304:             v = 2*rho(II+1)*(hx2+hy2)
305:             call MatSetValues(A,one,II,one,II,v,INSERT_VALUES,ierr)
306:             II = II+1
307:  100     continue
308:  110  continue
309: !
310: !     Assemble matrix
311: !
312:       call MatAssemblyBegin(A,MAT_FINAL_ASSEMBLY,ierr)
313:       call MatAssemblyEnd(A,MAT_FINAL_ASSEMBLY,ierr)

315: !
316: !     Set operators. Here the matrix that defines the linear system
317: !     also serves as the preconditioning matrix. Since all the matrices
318: !     will have the same nonzero pattern here, we indicate this so the
319: !     linear solvers can take advantage of this.
320: !
321:       call KSPSetOperators(ksp,A,A,SAME_NONZERO_PATTERN,ierr)

323: !
324: !     Set linear solver defaults for this problem (optional).
325: !     - Here we set it to use direct LU factorization for the solution
326: !
327:       call KSPGetPC(ksp,pc,ierr)
328:       call PCSetType(pc,PCLU,ierr)

330: !
331: !     Set runtime options, e.g.,
332: !        -ksp_type <type> -pc_type <type> -ksp_monitor -ksp_rtol <rtol>
333: !     These options will override those specified above as long as
334: !     KSPSetFromOptions() is called _after_ any other customization
335: !     routines.
336: !
337: !     Run the program with the option -help to see all the possible
338: !     linear solver options.
339: !
340:       call KSPSetFromOptions(ksp,ierr)

342: !
343: !     This allows the PETSc linear solvers to compute the solution
344: !     directly in the user's array rather than in the PETSc vector.
345: !
346: !     This is essentially a hack and not highly recommend unless you
347: !     are quite comfortable with using PETSc. In general, users should
348: !     write their entire application using PETSc vectors rather than
349: !     arrays.
350: !
351:       call VecPlaceArray(x,userx,ierr)
352:       call VecPlaceArray(b,userb,ierr)

354: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
355: !                      Solve the linear system
356: ! - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -

358:       call KSPSolve(ksp,b,x,ierr)

360:       call VecResetArray(x,ierr)
361:       call VecResetArray(b,ierr)

363:       return
364:       end

366: ! ------------------------------------------------------------------------

368:       subroutine UserFinalizeLinearSolver(userctx,ierr)

370:       implicit none

372:  #include finclude/petscsys.h
373:  #include finclude/petscvec.h
374:  #include finclude/petscmat.h
375:  #include finclude/petscksp.h
376:  #include finclude/petscpc.h

378:       PetscErrorCode ierr
379:       PetscFortranAddr userctx(*)

381: !  Local variable declararions

383:       Vec  x,b
384:       Mat  A
385:       KSP ksp
386: !
387: !     We are all done and don't need to solve any more linear systems, so
388: !     we free the work space.  All PETSc objects should be destroyed when
389: !     they are no longer needed.
390: !
391:       x    = userctx(1)
392:       b    = userctx(2)
393:       A    = userctx(3)
394:       ksp = userctx(4)

396:       call VecDestroy(x,ierr)
397:       call VecDestroy(b,ierr)
398:       call MatDestroy(A,ierr)
399:       call KSPDestroy(ksp,ierr)

401:       return
402:       end