Actual source code: theta.c
1: /*
2: Code for timestepping with implicit Theta method
3: */
4: #include <petsc/private/tsimpl.h>
5: #include <petscsnes.h>
6: #include <petscdm.h>
7: #include <petscmat.h>
9: typedef struct {
10: /* context for time stepping */
11: PetscReal stage_time;
12: Vec Stages[2]; /* Storage for stage solutions */
13: Vec X0,X,Xdot; /* Storage for u^n, u^n + dt a_{11} k_1, and time derivative u^{n+1}_t */
14: Vec affine; /* Affine vector needed for residual at beginning of step in endpoint formulation */
15: PetscReal Theta;
16: PetscReal shift; /* Shift parameter for SNES Jacobian, used by forward, TLM and adjoint */
17: PetscInt order;
18: PetscBool endpoint;
19: PetscBool extrapolate;
20: TSStepStatus status;
21: Vec VecCostIntegral0; /* Backup for roll-backs due to events, used by cost integral */
22: PetscReal ptime0; /* Backup for ts->ptime, the start time of current time step, used by TLM and cost integral */
23: PetscReal time_step0; /* Backup for ts->timestep, the step size of current time step, used by TLM and cost integral*/
25: /* context for sensitivity analysis */
26: PetscInt num_tlm; /* Total number of tangent linear equations */
27: Vec *VecsDeltaLam; /* Increment of the adjoint sensitivity w.r.t IC at stage */
28: Vec *VecsDeltaMu; /* Increment of the adjoint sensitivity w.r.t P at stage */
29: Vec *VecsSensiTemp; /* Vector to be multiplied with Jacobian transpose */
30: Mat MatFwdStages[2]; /* TLM Stages */
31: Mat MatDeltaFwdSensip; /* Increment of the forward sensitivity at stage */
32: Vec VecDeltaFwdSensipCol; /* Working vector for holding one column of the sensitivity matrix */
33: Mat MatFwdSensip0; /* backup for roll-backs due to events */
34: Mat MatIntegralSensipTemp; /* Working vector for forward integral sensitivity */
35: Mat MatIntegralSensip0; /* backup for roll-backs due to events */
36: Vec *VecsDeltaLam2; /* Increment of the 2nd-order adjoint sensitivity w.r.t IC at stage */
37: Vec *VecsDeltaMu2; /* Increment of the 2nd-order adjoint sensitivity w.r.t P at stage */
38: Vec *VecsSensi2Temp; /* Working vectors that holds the residual for the second-order adjoint */
39: Vec *VecsAffine; /* Working vectors to store residuals */
40: /* context for error estimation */
41: Vec vec_sol_prev;
42: Vec vec_lte_work;
43: } TS_Theta;
45: static PetscErrorCode TSThetaGetX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
46: {
47: TS_Theta *th = (TS_Theta*)ts->data;
51: if (X0) {
52: if (dm && dm != ts->dm) {
53: DMGetNamedGlobalVector(dm,"TSTheta_X0",X0);
54: } else *X0 = ts->vec_sol;
55: }
56: if (Xdot) {
57: if (dm && dm != ts->dm) {
58: DMGetNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
59: } else *Xdot = th->Xdot;
60: }
61: return(0);
62: }
64: static PetscErrorCode TSThetaRestoreX0AndXdot(TS ts,DM dm,Vec *X0,Vec *Xdot)
65: {
69: if (X0) {
70: if (dm && dm != ts->dm) {
71: DMRestoreNamedGlobalVector(dm,"TSTheta_X0",X0);
72: }
73: }
74: if (Xdot) {
75: if (dm && dm != ts->dm) {
76: DMRestoreNamedGlobalVector(dm,"TSTheta_Xdot",Xdot);
77: }
78: }
79: return(0);
80: }
82: static PetscErrorCode DMCoarsenHook_TSTheta(DM fine,DM coarse,void *ctx)
83: {
85: return(0);
86: }
88: static PetscErrorCode DMRestrictHook_TSTheta(DM fine,Mat restrct,Vec rscale,Mat inject,DM coarse,void *ctx)
89: {
90: TS ts = (TS)ctx;
92: Vec X0,Xdot,X0_c,Xdot_c;
95: TSThetaGetX0AndXdot(ts,fine,&X0,&Xdot);
96: TSThetaGetX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
97: MatRestrict(restrct,X0,X0_c);
98: MatRestrict(restrct,Xdot,Xdot_c);
99: VecPointwiseMult(X0_c,rscale,X0_c);
100: VecPointwiseMult(Xdot_c,rscale,Xdot_c);
101: TSThetaRestoreX0AndXdot(ts,fine,&X0,&Xdot);
102: TSThetaRestoreX0AndXdot(ts,coarse,&X0_c,&Xdot_c);
103: return(0);
104: }
106: static PetscErrorCode DMSubDomainHook_TSTheta(DM dm,DM subdm,void *ctx)
107: {
109: return(0);
110: }
112: static PetscErrorCode DMSubDomainRestrictHook_TSTheta(DM dm,VecScatter gscat,VecScatter lscat,DM subdm,void *ctx)
113: {
114: TS ts = (TS)ctx;
116: Vec X0,Xdot,X0_sub,Xdot_sub;
119: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
120: TSThetaGetX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
122: VecScatterBegin(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
123: VecScatterEnd(gscat,X0,X0_sub,INSERT_VALUES,SCATTER_FORWARD);
125: VecScatterBegin(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
126: VecScatterEnd(gscat,Xdot,Xdot_sub,INSERT_VALUES,SCATTER_FORWARD);
128: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
129: TSThetaRestoreX0AndXdot(ts,subdm,&X0_sub,&Xdot_sub);
130: return(0);
131: }
133: static PetscErrorCode TSThetaEvaluateCostIntegral(TS ts)
134: {
135: TS_Theta *th = (TS_Theta*)ts->data;
136: TS quadts = ts->quadraturets;
140: if (th->endpoint) {
141: /* Evolve ts->vec_costintegral to compute integrals */
142: if (th->Theta!=1.0) {
143: TSComputeRHSFunction(quadts,th->ptime0,th->X0,ts->vec_costintegrand);
144: VecAXPY(quadts->vec_sol,th->time_step0*(1.0-th->Theta),ts->vec_costintegrand);
145: }
146: TSComputeRHSFunction(quadts,ts->ptime,ts->vec_sol,ts->vec_costintegrand);
147: VecAXPY(quadts->vec_sol,th->time_step0*th->Theta,ts->vec_costintegrand);
148: } else {
149: TSComputeRHSFunction(quadts,th->stage_time,th->X,ts->vec_costintegrand);
150: VecAXPY(quadts->vec_sol,th->time_step0,ts->vec_costintegrand);
151: }
152: return(0);
153: }
155: static PetscErrorCode TSForwardCostIntegral_Theta(TS ts)
156: {
157: TS_Theta *th = (TS_Theta*)ts->data;
158: TS quadts = ts->quadraturets;
162: /* backup cost integral */
163: VecCopy(quadts->vec_sol,th->VecCostIntegral0);
164: TSThetaEvaluateCostIntegral(ts);
165: return(0);
166: }
168: static PetscErrorCode TSAdjointCostIntegral_Theta(TS ts)
169: {
170: TS_Theta *th = (TS_Theta*)ts->data;
174: /* Like TSForwardCostIntegral(), the adjoint cost integral evaluation relies on ptime0 and time_step0. */
175: th->ptime0 = ts->ptime + ts->time_step;
176: th->time_step0 = -ts->time_step;
177: TSThetaEvaluateCostIntegral(ts);
178: return(0);
179: }
181: static PetscErrorCode TSTheta_SNESSolve(TS ts,Vec b,Vec x)
182: {
183: PetscInt nits,lits;
187: SNESSolve(ts->snes,b,x);
188: SNESGetIterationNumber(ts->snes,&nits);
189: SNESGetLinearSolveIterations(ts->snes,&lits);
190: ts->snes_its += nits; ts->ksp_its += lits;
191: return(0);
192: }
194: static PetscErrorCode TSStep_Theta(TS ts)
195: {
196: TS_Theta *th = (TS_Theta*)ts->data;
197: PetscInt rejections = 0;
198: PetscBool stageok,accept = PETSC_TRUE;
199: PetscReal next_time_step = ts->time_step;
203: if (!ts->steprollback) {
204: if (th->vec_sol_prev) { VecCopy(th->X0,th->vec_sol_prev); }
205: VecCopy(ts->vec_sol,th->X0);
206: }
208: th->status = TS_STEP_INCOMPLETE;
209: while (!ts->reason && th->status != TS_STEP_COMPLETE) {
210: th->shift = 1/(th->Theta*ts->time_step);
211: th->stage_time = ts->ptime + (th->endpoint ? (PetscReal)1 : th->Theta)*ts->time_step;
212: VecCopy(th->X0,th->X);
213: if (th->extrapolate && !ts->steprestart) {
214: VecAXPY(th->X,1/th->shift,th->Xdot);
215: }
216: if (th->endpoint) { /* This formulation assumes linear time-independent mass matrix */
217: if (!th->affine) {VecDuplicate(ts->vec_sol,&th->affine);}
218: VecZeroEntries(th->Xdot);
219: TSComputeIFunction(ts,ts->ptime,th->X0,th->Xdot,th->affine,PETSC_FALSE);
220: VecScale(th->affine,(th->Theta-1)/th->Theta);
221: } else if (th->affine) { /* Just in case th->endpoint is changed between calls to TSStep_Theta() */
222: VecZeroEntries(th->affine);
223: }
224: TSPreStage(ts,th->stage_time);
225: TSTheta_SNESSolve(ts,th->affine,th->X);
226: TSPostStage(ts,th->stage_time,0,&th->X);
227: TSAdaptCheckStage(ts->adapt,ts,th->stage_time,th->X,&stageok);
228: if (!stageok) goto reject_step;
230: th->status = TS_STEP_PENDING;
231: if (th->endpoint) {
232: VecCopy(th->X,ts->vec_sol);
233: } else {
234: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
235: VecAXPY(ts->vec_sol,ts->time_step,th->Xdot);
236: }
237: TSAdaptChoose(ts->adapt,ts,ts->time_step,NULL,&next_time_step,&accept);
238: th->status = accept ? TS_STEP_COMPLETE : TS_STEP_INCOMPLETE;
239: if (!accept) {
240: VecCopy(th->X0,ts->vec_sol);
241: ts->time_step = next_time_step;
242: goto reject_step;
243: }
245: if (ts->forward_solve || ts->costintegralfwd) { /* Save the info for the later use in cost integral evaluation */
246: th->ptime0 = ts->ptime;
247: th->time_step0 = ts->time_step;
248: }
249: ts->ptime += ts->time_step;
250: ts->time_step = next_time_step;
251: break;
253: reject_step:
254: ts->reject++; accept = PETSC_FALSE;
255: if (!ts->reason && ++rejections > ts->max_reject && ts->max_reject >= 0) {
256: ts->reason = TS_DIVERGED_STEP_REJECTED;
257: PetscInfo2(ts,"Step=%D, step rejections %D greater than current TS allowed, stopping solve\n",ts->steps,rejections);
258: }
259: }
260: return(0);
261: }
263: static PetscErrorCode TSAdjointStepBEuler_Private(TS ts)
264: {
265: TS_Theta *th = (TS_Theta*)ts->data;
266: TS quadts = ts->quadraturets;
267: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
268: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
269: PetscInt nadj;
270: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
271: KSP ksp;
272: PetscScalar *xarr;
273: TSEquationType eqtype;
274: PetscBool isexplicitode = PETSC_FALSE;
275: PetscReal adjoint_time_step;
279: TSGetEquationType(ts,&eqtype);
280: if (eqtype == TS_EQ_ODE_EXPLICIT) {
281: isexplicitode = PETSC_TRUE;
282: VecsDeltaLam = ts->vecs_sensi;
283: VecsDeltaLam2 = ts->vecs_sensi2;
284: }
285: th->status = TS_STEP_INCOMPLETE;
286: SNESGetKSP(ts->snes,&ksp);
287: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
288: if (quadts) {
289: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
290: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
291: }
293: th->stage_time = ts->ptime;
294: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
296: /* Build RHS for first-order adjoint lambda_{n+1}/h + r_u^T(n+1) */
297: if (quadts) {
298: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
299: }
301: for (nadj=0; nadj<ts->numcost; nadj++) {
302: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
303: VecScale(VecsSensiTemp[nadj],1./adjoint_time_step); /* lambda_{n+1}/h */
304: if (quadJ) {
305: MatDenseGetColumn(quadJ,nadj,&xarr);
306: VecPlaceArray(ts->vec_drdu_col,xarr);
307: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
308: VecResetArray(ts->vec_drdu_col);
309: MatDenseRestoreColumn(quadJ,&xarr);
310: }
311: }
313: /* Build LHS for first-order adjoint */
314: th->shift = 1./adjoint_time_step;
315: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
316: KSPSetOperators(ksp,J,Jpre);
318: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
319: for (nadj=0; nadj<ts->numcost; nadj++) {
320: KSPConvergedReason kspreason;
321: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
322: KSPGetConvergedReason(ksp,&kspreason);
323: if (kspreason < 0) {
324: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
325: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
326: }
327: }
329: if (ts->vecs_sensi2) { /* U_{n+1} */
330: /* Get w1 at t_{n+1} from TLM matrix */
331: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
332: VecPlaceArray(ts->vec_sensip_col,xarr);
333: /* lambda_s^T F_UU w_1 */
334: TSComputeIHessianProductFunctionUU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
335: /* lambda_s^T F_UP w_2 */
336: TSComputeIHessianProductFunctionUP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
337: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
338: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
339: VecScale(VecsSensi2Temp[nadj],1./adjoint_time_step);
340: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
341: if (ts->vecs_fup) {
342: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
343: }
344: }
345: /* Solve stage equation LHS X = RHS for second-order adjoint */
346: for (nadj=0; nadj<ts->numcost; nadj++) {
347: KSPConvergedReason kspreason;
348: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
349: KSPGetConvergedReason(ksp,&kspreason);
350: if (kspreason < 0) {
351: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
352: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
353: }
354: }
355: }
357: /* Update sensitivities, and evaluate integrals if there is any */
358: if (!isexplicitode) {
359: th->shift = 0.0;
360: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
361: KSPSetOperators(ksp,J,Jpre);
362: MatScale(J,-1.);
363: for (nadj=0; nadj<ts->numcost; nadj++) {
364: /* Add f_U \lambda_s to the original RHS */
365: MatMultTransposeAdd(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj],VecsSensiTemp[nadj]);
366: VecScale(VecsSensiTemp[nadj],adjoint_time_step);
367: VecCopy(VecsSensiTemp[nadj],ts->vecs_sensi[nadj]);
368: if (ts->vecs_sensi2) {
369: MatMultTransposeAdd(J,VecsDeltaLam2[nadj],VecsSensi2Temp[nadj],VecsSensi2Temp[nadj]);
370: VecScale(VecsSensi2Temp[nadj],adjoint_time_step);
371: VecCopy(VecsSensi2Temp[nadj],ts->vecs_sensi2[nadj]);
372: }
373: }
374: }
375: if (ts->vecs_sensip) {
376: VecAXPBYPCZ(th->Xdot,-1./adjoint_time_step,1.0/adjoint_time_step,0,th->X0,ts->vec_sol);
377: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,1./adjoint_time_step,ts->Jacp,PETSC_FALSE); /* get -f_p */
378: if (quadts) {
379: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
380: }
381: if (ts->vecs_sensi2p) {
382: /* lambda_s^T F_PU w_1 */
383: TSComputeIHessianProductFunctionPU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
384: /* lambda_s^T F_PP w_2 */
385: TSComputeIHessianProductFunctionPP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
386: }
388: for (nadj=0; nadj<ts->numcost; nadj++) {
389: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
390: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
391: if (quadJp) {
392: MatDenseGetColumn(quadJp,nadj,&xarr);
393: VecPlaceArray(ts->vec_drdp_col,xarr);
394: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
395: VecResetArray(ts->vec_drdp_col);
396: MatDenseRestoreColumn(quadJp,&xarr);
397: }
398: if (ts->vecs_sensi2p) {
399: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
400: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,VecsDeltaMu2[nadj]);
401: if (ts->vecs_fpu) {
402: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpu[nadj]);
403: }
404: if (ts->vecs_fpp) {
405: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step,ts->vecs_fpp[nadj]);
406: }
407: }
408: }
409: }
411: if (ts->vecs_sensi2) {
412: VecResetArray(ts->vec_sensip_col);
413: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
414: }
415: th->status = TS_STEP_COMPLETE;
416: return(0);
417: }
419: static PetscErrorCode TSAdjointStep_Theta(TS ts)
420: {
421: TS_Theta *th = (TS_Theta*)ts->data;
422: TS quadts = ts->quadraturets;
423: Vec *VecsDeltaLam = th->VecsDeltaLam,*VecsDeltaMu = th->VecsDeltaMu,*VecsSensiTemp = th->VecsSensiTemp;
424: Vec *VecsDeltaLam2 = th->VecsDeltaLam2,*VecsDeltaMu2 = th->VecsDeltaMu2,*VecsSensi2Temp = th->VecsSensi2Temp;
425: PetscInt nadj;
426: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
427: KSP ksp;
428: PetscScalar *xarr;
429: PetscReal adjoint_time_step;
430: PetscReal adjoint_ptime; /* end time of the adjoint time step (ts->ptime is the start time, ususally ts->ptime is larger than adjoint_ptime) */
434: if (th->Theta == 1.) {
435: TSAdjointStepBEuler_Private(ts);
436: return(0);
437: }
438: th->status = TS_STEP_INCOMPLETE;
439: SNESGetKSP(ts->snes,&ksp);
440: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
441: if (quadts) {
442: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
443: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
444: }
445: /* If endpoint=1, th->ptime and th->X0 will be used; if endpoint=0, th->stage_time and th->X will be used. */
446: th->stage_time = th->endpoint ? ts->ptime : (ts->ptime+(1.-th->Theta)*ts->time_step);
447: adjoint_ptime = ts->ptime + ts->time_step;
448: adjoint_time_step = -ts->time_step; /* always positive since time_step is negative */
450: if (!th->endpoint) {
451: /* recover th->X0 using vec_sol and the stage value th->X */
452: VecAXPBYPCZ(th->X0,1.0/(1.0-th->Theta),th->Theta/(th->Theta-1.0),0,th->X,ts->vec_sol);
453: }
455: /* Build RHS for first-order adjoint */
456: /* Cost function has an integral term */
457: if (quadts) {
458: if (th->endpoint) {
459: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
460: } else {
461: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
462: }
463: }
465: for (nadj=0; nadj<ts->numcost; nadj++) {
466: VecCopy(ts->vecs_sensi[nadj],VecsSensiTemp[nadj]);
467: VecScale(VecsSensiTemp[nadj],1./(th->Theta*adjoint_time_step));
468: if (quadJ) {
469: MatDenseGetColumn(quadJ,nadj,&xarr);
470: VecPlaceArray(ts->vec_drdu_col,xarr);
471: VecAXPY(VecsSensiTemp[nadj],1.,ts->vec_drdu_col);
472: VecResetArray(ts->vec_drdu_col);
473: MatDenseRestoreColumn(quadJ,&xarr);
474: }
475: }
477: /* Build LHS for first-order adjoint */
478: th->shift = 1./(th->Theta*adjoint_time_step);
479: if (th->endpoint) {
480: TSComputeSNESJacobian(ts,ts->vec_sol,J,Jpre);
481: } else {
482: TSComputeSNESJacobian(ts,th->X,J,Jpre);
483: }
484: KSPSetOperators(ksp,J,Jpre);
486: /* Solve stage equation LHS*lambda_s = RHS for first-order adjoint */
487: for (nadj=0; nadj<ts->numcost; nadj++) {
488: KSPConvergedReason kspreason;
489: KSPSolveTranspose(ksp,VecsSensiTemp[nadj],VecsDeltaLam[nadj]);
490: KSPGetConvergedReason(ksp,&kspreason);
491: if (kspreason < 0) {
492: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
493: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 1st-order adjoint solve\n",ts->steps,nadj);
494: }
495: }
497: /* Second-order adjoint */
498: if (ts->vecs_sensi2) { /* U_{n+1} */
499: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_SUP,"Operation not implemented in TS_Theta");
500: /* Get w1 at t_{n+1} from TLM matrix */
501: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
502: VecPlaceArray(ts->vec_sensip_col,xarr);
503: /* lambda_s^T F_UU w_1 */
504: TSComputeIHessianProductFunctionUU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
505: VecResetArray(ts->vec_sensip_col);
506: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
507: /* lambda_s^T F_UP w_2 */
508: TSComputeIHessianProductFunctionUP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
509: for (nadj=0; nadj<ts->numcost; nadj++) { /* compute the residual */
510: VecCopy(ts->vecs_sensi2[nadj],VecsSensi2Temp[nadj]);
511: VecScale(VecsSensi2Temp[nadj],th->shift);
512: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fuu[nadj]);
513: if (ts->vecs_fup) {
514: VecAXPY(VecsSensi2Temp[nadj],-1.,ts->vecs_fup[nadj]);
515: }
516: }
517: /* Solve stage equation LHS X = RHS for second-order adjoint */
518: for (nadj=0; nadj<ts->numcost; nadj++) {
519: KSPConvergedReason kspreason;
520: KSPSolveTranspose(ksp,VecsSensi2Temp[nadj],VecsDeltaLam2[nadj]);
521: KSPGetConvergedReason(ksp,&kspreason);
522: if (kspreason < 0) {
523: ts->reason = TSADJOINT_DIVERGED_LINEAR_SOLVE;
524: PetscInfo2(ts,"Step=%D, %Dth cost function, transposed linear solve fails, stopping 2nd-order adjoint solve\n",ts->steps,nadj);
525: }
526: }
527: }
529: /* Update sensitivities, and evaluate integrals if there is any */
530: if (th->endpoint) { /* two-stage Theta methods with th->Theta!=1, th->Theta==1 leads to BEuler */
531: th->shift = 1./((th->Theta-1.)*adjoint_time_step);
532: th->stage_time = adjoint_ptime;
533: TSComputeSNESJacobian(ts,th->X0,J,Jpre);
534: KSPSetOperators(ksp,J,Jpre);
535: /* R_U at t_n */
536: if (quadts) {
537: TSComputeRHSJacobian(quadts,adjoint_ptime,th->X0,quadJ,NULL);
538: }
539: for (nadj=0; nadj<ts->numcost; nadj++) {
540: MatMultTranspose(J,VecsDeltaLam[nadj],ts->vecs_sensi[nadj]);
541: if (quadJ) {
542: MatDenseGetColumn(quadJ,nadj,&xarr);
543: VecPlaceArray(ts->vec_drdu_col,xarr);
544: VecAXPY(ts->vecs_sensi[nadj],-1.,ts->vec_drdu_col);
545: VecResetArray(ts->vec_drdu_col);
546: MatDenseRestoreColumn(quadJ,&xarr);
547: }
548: VecScale(ts->vecs_sensi[nadj],1./th->shift);
549: }
551: /* Second-order adjoint */
552: if (ts->vecs_sensi2) { /* U_n */
553: /* Get w1 at t_n from TLM matrix */
554: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
555: VecPlaceArray(ts->vec_sensip_col,xarr);
556: /* lambda_s^T F_UU w_1 */
557: TSComputeIHessianProductFunctionUU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fuu);
558: VecResetArray(ts->vec_sensip_col);
559: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
560: /* lambda_s^T F_UU w_2 */
561: TSComputeIHessianProductFunctionUP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fup);
562: for (nadj=0; nadj<ts->numcost; nadj++) {
563: /* M^T Lambda_s + h(1-theta) F_U^T Lambda_s + h(1-theta) lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2 */
564: MatMultTranspose(J,VecsDeltaLam2[nadj],ts->vecs_sensi2[nadj]);
565: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fuu[nadj]);
566: if (ts->vecs_fup) {
567: VecAXPY(ts->vecs_sensi2[nadj],1.,ts->vecs_fup[nadj]);
568: }
569: VecScale(ts->vecs_sensi2[nadj],1./th->shift);
570: }
571: }
573: th->stage_time = ts->ptime; /* recover the old value */
575: if (ts->vecs_sensip) { /* sensitivities wrt parameters */
576: /* U_{n+1} */
577: th->shift = 1.0/(adjoint_time_step*th->Theta);
578: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,ts->vec_sol);
579: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,-1./(th->Theta*adjoint_time_step),ts->Jacp,PETSC_FALSE);
580: if (quadts) {
581: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
582: }
583: for (nadj=0; nadj<ts->numcost; nadj++) {
584: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
585: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu[nadj]);
586: if (quadJp) {
587: MatDenseGetColumn(quadJp,nadj,&xarr);
588: VecPlaceArray(ts->vec_drdp_col,xarr);
589: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*th->Theta,ts->vec_drdp_col);
590: VecResetArray(ts->vec_drdp_col);
591: MatDenseRestoreColumn(quadJp,&xarr);
592: }
593: }
594: if (ts->vecs_sensi2p) { /* second-order */
595: /* Get w1 at t_{n+1} from TLM matrix */
596: MatDenseGetColumn(ts->mat_sensip,0,&xarr);
597: VecPlaceArray(ts->vec_sensip_col,xarr);
598: /* lambda_s^T F_PU w_1 */
599: TSComputeIHessianProductFunctionPU(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
600: VecResetArray(ts->vec_sensip_col);
601: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
603: /* lambda_s^T F_PP w_2 */
604: TSComputeIHessianProductFunctionPP(ts,th->stage_time,ts->vec_sol,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
605: for (nadj=0; nadj<ts->numcost; nadj++) {
606: /* Mu2 <- Mu2 + h theta F_P^T Lambda_s + h theta (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
607: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
608: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,VecsDeltaMu2[nadj]);
609: if (ts->vecs_fpu) {
610: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpu[nadj]);
611: }
612: if (ts->vecs_fpp) {
613: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*th->Theta,ts->vecs_fpp[nadj]);
614: }
615: }
616: }
618: /* U_s */
619: VecZeroEntries(th->Xdot);
620: TSComputeIJacobianP(ts,adjoint_ptime,th->X0,th->Xdot,1./((th->Theta-1.0)*adjoint_time_step),ts->Jacp,PETSC_FALSE);
621: if (quadts) {
622: TSComputeRHSJacobianP(quadts,adjoint_ptime,th->X0,quadJp);
623: }
624: for (nadj=0; nadj<ts->numcost; nadj++) {
625: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
626: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu[nadj]);
627: if (quadJp) {
628: MatDenseGetColumn(quadJp,nadj,&xarr);
629: VecPlaceArray(ts->vec_drdp_col,xarr);
630: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step*(1.0-th->Theta),ts->vec_drdp_col);
631: VecResetArray(ts->vec_drdp_col);
632: MatDenseRestoreColumn(quadJp,&xarr);
633: }
634: if (ts->vecs_sensi2p) { /* second-order */
635: /* Get w1 at t_n from TLM matrix */
636: MatDenseGetColumn(th->MatFwdSensip0,0,&xarr);
637: VecPlaceArray(ts->vec_sensip_col,xarr);
638: /* lambda_s^T F_PU w_1 */
639: TSComputeIHessianProductFunctionPU(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_sensip_col,ts->vecs_fpu);
640: VecResetArray(ts->vec_sensip_col);
641: MatDenseRestoreColumn(th->MatFwdSensip0,&xarr);
642: /* lambda_s^T F_PP w_2 */
643: TSComputeIHessianProductFunctionPP(ts,adjoint_ptime,th->X0,VecsDeltaLam,ts->vec_dir,ts->vecs_fpp);
644: for (nadj=0; nadj<ts->numcost; nadj++) {
645: /* Mu2 <- Mu2 + h(1-theta) F_P^T Lambda_s + h(1-theta) (lambda_s^T F_UU w_1 + lambda_s^T F_UP w_2) */
646: MatMultTranspose(ts->Jacp,VecsDeltaLam2[nadj],VecsDeltaMu2[nadj]);
647: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),VecsDeltaMu2[nadj]);
648: if (ts->vecs_fpu) {
649: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpu[nadj]);
650: }
651: if (ts->vecs_fpp) {
652: VecAXPY(ts->vecs_sensi2p[nadj],-adjoint_time_step*(1.0-th->Theta),ts->vecs_fpp[nadj]);
653: }
654: }
655: }
656: }
657: }
658: } else { /* one-stage case */
659: th->shift = 0.0;
660: TSComputeSNESJacobian(ts,th->X,J,Jpre); /* get -f_y */
661: KSPSetOperators(ksp,J,Jpre);
662: if (quadts) {
663: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
664: }
665: for (nadj=0; nadj<ts->numcost; nadj++) {
666: MatMultTranspose(J,VecsDeltaLam[nadj],VecsSensiTemp[nadj]);
667: VecAXPY(ts->vecs_sensi[nadj],-adjoint_time_step,VecsSensiTemp[nadj]);
668: if (quadJ) {
669: MatDenseGetColumn(quadJ,nadj,&xarr);
670: VecPlaceArray(ts->vec_drdu_col,xarr);
671: VecAXPY(ts->vecs_sensi[nadj],adjoint_time_step,ts->vec_drdu_col);
672: VecResetArray(ts->vec_drdu_col);
673: MatDenseRestoreColumn(quadJ,&xarr);
674: }
675: }
676: if (ts->vecs_sensip) {
677: th->shift = 1.0/(adjoint_time_step*th->Theta);
678: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
679: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
680: if (quadts) {
681: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
682: }
683: for (nadj=0; nadj<ts->numcost; nadj++) {
684: MatMultTranspose(ts->Jacp,VecsDeltaLam[nadj],VecsDeltaMu[nadj]);
685: VecAXPY(ts->vecs_sensip[nadj],-adjoint_time_step,VecsDeltaMu[nadj]);
686: if (quadJp) {
687: MatDenseGetColumn(quadJp,nadj,&xarr);
688: VecPlaceArray(ts->vec_drdp_col,xarr);
689: VecAXPY(ts->vecs_sensip[nadj],adjoint_time_step,ts->vec_drdp_col);
690: VecResetArray(ts->vec_drdp_col);
691: MatDenseRestoreColumn(quadJp,&xarr);
692: }
693: }
694: }
695: }
697: th->status = TS_STEP_COMPLETE;
698: return(0);
699: }
701: static PetscErrorCode TSInterpolate_Theta(TS ts,PetscReal t,Vec X)
702: {
703: TS_Theta *th = (TS_Theta*)ts->data;
704: PetscReal dt = t - ts->ptime;
708: VecCopy(ts->vec_sol,th->X);
709: if (th->endpoint) dt *= th->Theta;
710: VecWAXPY(X,dt,th->Xdot,th->X);
711: return(0);
712: }
714: static PetscErrorCode TSEvaluateWLTE_Theta(TS ts,NormType wnormtype,PetscInt *order,PetscReal *wlte)
715: {
716: TS_Theta *th = (TS_Theta*)ts->data;
717: Vec X = ts->vec_sol; /* X = solution */
718: Vec Y = th->vec_lte_work; /* Y = X + LTE */
719: PetscReal wltea,wlter;
723: if (!th->vec_sol_prev) {*wlte = -1; return(0);}
724: /* Cannot compute LTE in first step or in restart after event */
725: if (ts->steprestart) {*wlte = -1; return(0);}
726: /* Compute LTE using backward differences with non-constant time step */
727: {
728: PetscReal h = ts->time_step, h_prev = ts->ptime - ts->ptime_prev;
729: PetscReal a = 1 + h_prev/h;
730: PetscScalar scal[3]; Vec vecs[3];
731: scal[0] = +1/a; scal[1] = -1/(a-1); scal[2] = +1/(a*(a-1));
732: vecs[0] = X; vecs[1] = th->X0; vecs[2] = th->vec_sol_prev;
733: VecCopy(X,Y);
734: VecMAXPY(Y,3,scal,vecs);
735: TSErrorWeightedNorm(ts,X,Y,wnormtype,wlte,&wltea,&wlter);
736: }
737: if (order) *order = 2;
738: return(0);
739: }
741: static PetscErrorCode TSRollBack_Theta(TS ts)
742: {
743: TS_Theta *th = (TS_Theta*)ts->data;
744: TS quadts = ts->quadraturets;
748: VecCopy(th->X0,ts->vec_sol);
749: if (quadts && ts->costintegralfwd) {
750: VecCopy(th->VecCostIntegral0,quadts->vec_sol);
751: }
752: th->status = TS_STEP_INCOMPLETE;
753: if (ts->mat_sensip) {
754: MatCopy(th->MatFwdSensip0,ts->mat_sensip,SAME_NONZERO_PATTERN);
755: }
756: if (quadts && quadts->mat_sensip) {
757: MatCopy(th->MatIntegralSensip0,quadts->mat_sensip,SAME_NONZERO_PATTERN);
758: }
759: return(0);
760: }
762: static PetscErrorCode TSForwardStep_Theta(TS ts)
763: {
764: TS_Theta *th = (TS_Theta*)ts->data;
765: TS quadts = ts->quadraturets;
766: Mat MatDeltaFwdSensip = th->MatDeltaFwdSensip;
767: Vec VecDeltaFwdSensipCol = th->VecDeltaFwdSensipCol;
768: PetscInt ntlm;
769: KSP ksp;
770: Mat J,Jpre,quadJ = NULL,quadJp = NULL;
771: PetscScalar *barr,*xarr;
772: PetscReal previous_shift;
776: previous_shift = th->shift;
777: MatCopy(ts->mat_sensip,th->MatFwdSensip0,SAME_NONZERO_PATTERN);
779: if (quadts && quadts->mat_sensip) {
780: MatCopy(quadts->mat_sensip,th->MatIntegralSensip0,SAME_NONZERO_PATTERN);
781: }
782: SNESGetKSP(ts->snes,&ksp);
783: TSGetIJacobian(ts,&J,&Jpre,NULL,NULL);
784: if (quadts) {
785: TSGetRHSJacobian(quadts,&quadJ,NULL,NULL,NULL);
786: TSGetRHSJacobianP(quadts,&quadJp,NULL,NULL);
787: }
789: /* Build RHS */
790: if (th->endpoint) { /* 2-stage method*/
791: th->shift = 1./((th->Theta-1.)*th->time_step0);
792: TSComputeIJacobian(ts,th->ptime0,th->X0,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
793: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
794: MatScale(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta);
796: /* Add the f_p forcing terms */
797: if (ts->Jacp) {
798: VecZeroEntries(th->Xdot);
799: TSComputeIJacobianP(ts,th->ptime0,th->X0,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
800: MatAXPY(MatDeltaFwdSensip,(th->Theta-1.)/th->Theta,ts->Jacp,SUBSET_NONZERO_PATTERN);
801: th->shift = previous_shift;
802: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,ts->vec_sol);
803: TSComputeIJacobianP(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
804: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
805: }
806: } else { /* 1-stage method */
807: th->shift = 0.0;
808: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
809: MatMatMult(J,ts->mat_sensip,MAT_REUSE_MATRIX,PETSC_DEFAULT,&MatDeltaFwdSensip);
810: MatScale(MatDeltaFwdSensip,-1.);
812: /* Add the f_p forcing terms */
813: if (ts->Jacp) {
814: th->shift = previous_shift;
815: VecAXPBYPCZ(th->Xdot,-th->shift,th->shift,0,th->X0,th->X);
816: TSComputeIJacobianP(ts,th->stage_time,th->X,th->Xdot,th->shift,ts->Jacp,PETSC_FALSE);
817: MatAXPY(MatDeltaFwdSensip,-1.,ts->Jacp,SUBSET_NONZERO_PATTERN);
818: }
819: }
821: /* Build LHS */
822: th->shift = previous_shift; /* recover the previous shift used in TSStep_Theta() */
823: if (th->endpoint) {
824: TSComputeIJacobian(ts,th->stage_time,ts->vec_sol,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
825: } else {
826: TSComputeIJacobian(ts,th->stage_time,th->X,th->Xdot,th->shift,J,Jpre,PETSC_FALSE);
827: }
828: KSPSetOperators(ksp,J,Jpre);
830: /*
831: Evaluate the first stage of integral gradients with the 2-stage method:
832: drdu|t_n*S(t_n) + drdp|t_n
833: This is done before the linear solve because the sensitivity variable S(t_n) will be propagated to S(t_{n+1})
834: */
835: if (th->endpoint) { /* 2-stage method only */
836: if (quadts && quadts->mat_sensip) {
837: TSComputeRHSJacobian(quadts,th->ptime0,th->X0,quadJ,NULL);
838: TSComputeRHSJacobianP(quadts,th->ptime0,th->X0,quadJp);
839: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
840: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
841: MatAXPY(quadts->mat_sensip,th->time_step0*(1.-th->Theta),th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
842: }
843: }
845: /* Solve the tangent linear equation for forward sensitivities to parameters */
846: for (ntlm=0; ntlm<th->num_tlm; ntlm++) {
847: KSPConvergedReason kspreason;
848: MatDenseGetColumn(MatDeltaFwdSensip,ntlm,&barr);
849: VecPlaceArray(VecDeltaFwdSensipCol,barr);
850: if (th->endpoint) {
851: MatDenseGetColumn(ts->mat_sensip,ntlm,&xarr);
852: VecPlaceArray(ts->vec_sensip_col,xarr);
853: KSPSolve(ksp,VecDeltaFwdSensipCol,ts->vec_sensip_col);
854: VecResetArray(ts->vec_sensip_col);
855: MatDenseRestoreColumn(ts->mat_sensip,&xarr);
856: } else {
857: KSPSolve(ksp,VecDeltaFwdSensipCol,VecDeltaFwdSensipCol);
858: }
859: KSPGetConvergedReason(ksp,&kspreason);
860: if (kspreason < 0) {
861: ts->reason = TSFORWARD_DIVERGED_LINEAR_SOLVE;
862: PetscInfo2(ts,"Step=%D, %Dth tangent linear solve, linear solve fails, stopping tangent linear solve\n",ts->steps,ntlm);
863: }
864: VecResetArray(VecDeltaFwdSensipCol);
865: MatDenseRestoreColumn(MatDeltaFwdSensip,&barr);
866: }
868: /*
869: Evaluate the second stage of integral gradients with the 2-stage method:
870: drdu|t_{n+1}*S(t_{n+1}) + drdp|t_{n+1}
871: */
872: if (quadts && quadts->mat_sensip) {
873: if (!th->endpoint) {
874: MatAXPY(ts->mat_sensip,1,MatDeltaFwdSensip,SAME_NONZERO_PATTERN); /* stage sensitivity */
875: TSComputeRHSJacobian(quadts,th->stage_time,th->X,quadJ,NULL);
876: TSComputeRHSJacobianP(quadts,th->stage_time,th->X,quadJp);
877: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
878: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
879: MatAXPY(quadts->mat_sensip,th->time_step0,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
880: MatAXPY(ts->mat_sensip,(1.-th->Theta)/th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
881: } else {
882: TSComputeRHSJacobian(quadts,th->stage_time,ts->vec_sol,quadJ,NULL);
883: TSComputeRHSJacobianP(quadts,th->stage_time,ts->vec_sol,quadJp);
884: MatTransposeMatMult(ts->mat_sensip,quadJ,MAT_REUSE_MATRIX,PETSC_DEFAULT,&th->MatIntegralSensipTemp);
885: MatAXPY(th->MatIntegralSensipTemp,1,quadJp,SAME_NONZERO_PATTERN);
886: MatAXPY(quadts->mat_sensip,th->time_step0*th->Theta,th->MatIntegralSensipTemp,SAME_NONZERO_PATTERN);
887: }
888: } else {
889: if (!th->endpoint) {
890: MatAXPY(ts->mat_sensip,1./th->Theta,MatDeltaFwdSensip,SAME_NONZERO_PATTERN);
891: }
892: }
893: return(0);
894: }
896: static PetscErrorCode TSForwardGetStages_Theta(TS ts,PetscInt *ns,Mat *stagesensip[])
897: {
898: TS_Theta *th = (TS_Theta*)ts->data;
901: if (ns) {
902: if (!th->endpoint && th->Theta != 1.0) *ns = 1; /* midpoint form */
903: else *ns = 2; /* endpoint form */
904: }
905: if (stagesensip) {
906: if (!th->endpoint && th->Theta != 1.0) {
907: th->MatFwdStages[0] = th->MatDeltaFwdSensip;
908: } else {
909: th->MatFwdStages[0] = th->MatFwdSensip0;
910: th->MatFwdStages[1] = ts->mat_sensip; /* stiffly accurate */
911: }
912: *stagesensip = th->MatFwdStages;
913: }
914: return(0);
915: }
917: /*------------------------------------------------------------*/
918: static PetscErrorCode TSReset_Theta(TS ts)
919: {
920: TS_Theta *th = (TS_Theta*)ts->data;
924: VecDestroy(&th->X);
925: VecDestroy(&th->Xdot);
926: VecDestroy(&th->X0);
927: VecDestroy(&th->affine);
929: VecDestroy(&th->vec_sol_prev);
930: VecDestroy(&th->vec_lte_work);
932: VecDestroy(&th->VecCostIntegral0);
933: return(0);
934: }
936: static PetscErrorCode TSAdjointReset_Theta(TS ts)
937: {
938: TS_Theta *th = (TS_Theta*)ts->data;
942: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam);
943: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu);
944: VecDestroyVecs(ts->numcost,&th->VecsDeltaLam2);
945: VecDestroyVecs(ts->numcost,&th->VecsDeltaMu2);
946: VecDestroyVecs(ts->numcost,&th->VecsSensiTemp);
947: VecDestroyVecs(ts->numcost,&th->VecsSensi2Temp);
948: return(0);
949: }
951: static PetscErrorCode TSDestroy_Theta(TS ts)
952: {
956: TSReset_Theta(ts);
957: if (ts->dm) {
958: DMCoarsenHookRemove(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
959: DMSubDomainHookRemove(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
960: }
961: PetscFree(ts->data);
962: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",NULL);
963: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",NULL);
964: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",NULL);
965: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",NULL);
966: return(0);
967: }
969: /*
970: This defines the nonlinear equation that is to be solved with SNES
971: G(U) = F[t0+Theta*dt, U, (U-U0)*shift] = 0
973: Note that U here is the stage argument. This means that U = U_{n+1} only if endpoint = true,
974: otherwise U = theta U_{n+1} + (1 - theta) U0, which for the case of implicit midpoint is
975: U = (U_{n+1} + U0)/2
976: */
977: static PetscErrorCode SNESTSFormFunction_Theta(SNES snes,Vec x,Vec y,TS ts)
978: {
979: TS_Theta *th = (TS_Theta*)ts->data;
981: Vec X0,Xdot;
982: DM dm,dmsave;
983: PetscReal shift = th->shift;
986: SNESGetDM(snes,&dm);
987: /* When using the endpoint variant, this is actually 1/Theta * Xdot */
988: TSThetaGetX0AndXdot(ts,dm,&X0,&Xdot);
989: if (x != X0) {
990: VecAXPBYPCZ(Xdot,-shift,shift,0,X0,x);
991: } else {
992: VecZeroEntries(Xdot);
993: }
994: /* DM monkey-business allows user code to call TSGetDM() inside of functions evaluated on levels of FAS */
995: dmsave = ts->dm;
996: ts->dm = dm;
997: TSComputeIFunction(ts,th->stage_time,x,Xdot,y,PETSC_FALSE);
998: ts->dm = dmsave;
999: TSThetaRestoreX0AndXdot(ts,dm,&X0,&Xdot);
1000: return(0);
1001: }
1003: static PetscErrorCode SNESTSFormJacobian_Theta(SNES snes,Vec x,Mat A,Mat B,TS ts)
1004: {
1005: TS_Theta *th = (TS_Theta*)ts->data;
1007: Vec Xdot;
1008: DM dm,dmsave;
1009: PetscReal shift = th->shift;
1012: SNESGetDM(snes,&dm);
1013: /* Xdot has already been computed in SNESTSFormFunction_Theta (SNES guarantees this) */
1014: TSThetaGetX0AndXdot(ts,dm,NULL,&Xdot);
1016: dmsave = ts->dm;
1017: ts->dm = dm;
1018: TSComputeIJacobian(ts,th->stage_time,x,Xdot,shift,A,B,PETSC_FALSE);
1019: ts->dm = dmsave;
1020: TSThetaRestoreX0AndXdot(ts,dm,NULL,&Xdot);
1021: return(0);
1022: }
1024: static PetscErrorCode TSForwardSetUp_Theta(TS ts)
1025: {
1026: TS_Theta *th = (TS_Theta*)ts->data;
1027: TS quadts = ts->quadraturets;
1031: /* combine sensitivities to parameters and sensitivities to initial values into one array */
1032: th->num_tlm = ts->num_parameters;
1033: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatDeltaFwdSensip);
1034: if (quadts && quadts->mat_sensip) {
1035: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensipTemp);
1036: MatDuplicate(quadts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatIntegralSensip0);
1037: }
1038: /* backup sensitivity results for roll-backs */
1039: MatDuplicate(ts->mat_sensip,MAT_DO_NOT_COPY_VALUES,&th->MatFwdSensip0);
1041: VecDuplicate(ts->vec_sol,&th->VecDeltaFwdSensipCol);
1042: return(0);
1043: }
1045: static PetscErrorCode TSForwardReset_Theta(TS ts)
1046: {
1047: TS_Theta *th = (TS_Theta*)ts->data;
1048: TS quadts = ts->quadraturets;
1052: if (quadts && quadts->mat_sensip) {
1053: MatDestroy(&th->MatIntegralSensipTemp);
1054: MatDestroy(&th->MatIntegralSensip0);
1055: }
1056: VecDestroy(&th->VecDeltaFwdSensipCol);
1057: MatDestroy(&th->MatDeltaFwdSensip);
1058: MatDestroy(&th->MatFwdSensip0);
1059: return(0);
1060: }
1062: static PetscErrorCode TSSetUp_Theta(TS ts)
1063: {
1064: TS_Theta *th = (TS_Theta*)ts->data;
1065: TS quadts = ts->quadraturets;
1066: PetscBool match;
1070: if (!th->VecCostIntegral0 && quadts && ts->costintegralfwd) { /* back up cost integral */
1071: VecDuplicate(quadts->vec_sol,&th->VecCostIntegral0);
1072: }
1073: if (!th->X) {
1074: VecDuplicate(ts->vec_sol,&th->X);
1075: }
1076: if (!th->Xdot) {
1077: VecDuplicate(ts->vec_sol,&th->Xdot);
1078: }
1079: if (!th->X0) {
1080: VecDuplicate(ts->vec_sol,&th->X0);
1081: }
1082: if (th->endpoint) {
1083: VecDuplicate(ts->vec_sol,&th->affine);
1084: }
1086: th->order = (th->Theta == 0.5) ? 2 : 1;
1087: th->shift = 1/(th->Theta*ts->time_step);
1089: TSGetDM(ts,&ts->dm);
1090: DMCoarsenHookAdd(ts->dm,DMCoarsenHook_TSTheta,DMRestrictHook_TSTheta,ts);
1091: DMSubDomainHookAdd(ts->dm,DMSubDomainHook_TSTheta,DMSubDomainRestrictHook_TSTheta,ts);
1093: TSGetAdapt(ts,&ts->adapt);
1094: TSAdaptCandidatesClear(ts->adapt);
1095: PetscObjectTypeCompare((PetscObject)ts->adapt,TSADAPTNONE,&match);
1096: if (!match) {
1097: VecDuplicate(ts->vec_sol,&th->vec_sol_prev);
1098: VecDuplicate(ts->vec_sol,&th->vec_lte_work);
1099: }
1100: TSGetSNES(ts,&ts->snes);
1102: ts->stifflyaccurate = (!th->endpoint && th->Theta != 1.0) ? PETSC_FALSE : PETSC_TRUE;
1103: return(0);
1104: }
1106: /*------------------------------------------------------------*/
1108: static PetscErrorCode TSAdjointSetUp_Theta(TS ts)
1109: {
1110: TS_Theta *th = (TS_Theta*)ts->data;
1114: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam);
1115: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsSensiTemp);
1116: if (ts->vecs_sensip) {
1117: VecDuplicateVecs(ts->vecs_sensip[0],ts->numcost,&th->VecsDeltaMu);
1118: }
1119: if (ts->vecs_sensi2) {
1120: VecDuplicateVecs(ts->vecs_sensi[0],ts->numcost,&th->VecsDeltaLam2);
1121: VecDuplicateVecs(ts->vecs_sensi2[0],ts->numcost,&th->VecsSensi2Temp);
1122: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1123: if (!ts->ihessianproduct_fuu) ts->vecs_fuu = ts->vecs_guu;
1124: if (!ts->ihessianproduct_fup) ts->vecs_fup = ts->vecs_gup;
1125: }
1126: if (ts->vecs_sensi2p) {
1127: VecDuplicateVecs(ts->vecs_sensi2p[0],ts->numcost,&th->VecsDeltaMu2);
1128: /* hack ts to make implicit TS solver work when users provide only explicit versions of callbacks (RHSFunction,RHSJacobian,RHSHessian etc.) */
1129: if (!ts->ihessianproduct_fpu) ts->vecs_fpu = ts->vecs_gpu;
1130: if (!ts->ihessianproduct_fpp) ts->vecs_fpp = ts->vecs_gpp;
1131: }
1132: return(0);
1133: }
1135: static PetscErrorCode TSSetFromOptions_Theta(PetscOptionItems *PetscOptionsObject,TS ts)
1136: {
1137: TS_Theta *th = (TS_Theta*)ts->data;
1141: PetscOptionsHead(PetscOptionsObject,"Theta ODE solver options");
1142: {
1143: PetscOptionsReal("-ts_theta_theta","Location of stage (0<Theta<=1)","TSThetaSetTheta",th->Theta,&th->Theta,NULL);
1144: PetscOptionsBool("-ts_theta_endpoint","Use the endpoint instead of midpoint form of the Theta method","TSThetaSetEndpoint",th->endpoint,&th->endpoint,NULL);
1145: PetscOptionsBool("-ts_theta_initial_guess_extrapolate","Extrapolate stage initial guess from previous solution (sometimes unstable)","TSThetaSetExtrapolate",th->extrapolate,&th->extrapolate,NULL);
1146: }
1147: PetscOptionsTail();
1148: return(0);
1149: }
1151: static PetscErrorCode TSView_Theta(TS ts,PetscViewer viewer)
1152: {
1153: TS_Theta *th = (TS_Theta*)ts->data;
1154: PetscBool iascii;
1158: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);
1159: if (iascii) {
1160: PetscViewerASCIIPrintf(viewer," Theta=%g\n",(double)th->Theta);
1161: PetscViewerASCIIPrintf(viewer," Extrapolation=%s\n",th->extrapolate ? "yes" : "no");
1162: }
1163: return(0);
1164: }
1166: static PetscErrorCode TSThetaGetTheta_Theta(TS ts,PetscReal *theta)
1167: {
1168: TS_Theta *th = (TS_Theta*)ts->data;
1171: *theta = th->Theta;
1172: return(0);
1173: }
1175: static PetscErrorCode TSThetaSetTheta_Theta(TS ts,PetscReal theta)
1176: {
1177: TS_Theta *th = (TS_Theta*)ts->data;
1180: if (theta <= 0 || 1 < theta) SETERRQ1(PetscObjectComm((PetscObject)ts),PETSC_ERR_ARG_OUTOFRANGE,"Theta %g not in range (0,1]",(double)theta);
1181: th->Theta = theta;
1182: th->order = (th->Theta == 0.5) ? 2 : 1;
1183: return(0);
1184: }
1186: static PetscErrorCode TSThetaGetEndpoint_Theta(TS ts,PetscBool *endpoint)
1187: {
1188: TS_Theta *th = (TS_Theta*)ts->data;
1191: *endpoint = th->endpoint;
1192: return(0);
1193: }
1195: static PetscErrorCode TSThetaSetEndpoint_Theta(TS ts,PetscBool flg)
1196: {
1197: TS_Theta *th = (TS_Theta*)ts->data;
1200: th->endpoint = flg;
1201: return(0);
1202: }
1204: #if defined(PETSC_HAVE_COMPLEX)
1205: static PetscErrorCode TSComputeLinearStability_Theta(TS ts,PetscReal xr,PetscReal xi,PetscReal *yr,PetscReal *yi)
1206: {
1207: PetscComplex z = xr + xi*PETSC_i,f;
1208: TS_Theta *th = (TS_Theta*)ts->data;
1209: const PetscReal one = 1.0;
1212: f = (one + (one - th->Theta)*z)/(one - th->Theta*z);
1213: *yr = PetscRealPartComplex(f);
1214: *yi = PetscImaginaryPartComplex(f);
1215: return(0);
1216: }
1217: #endif
1219: static PetscErrorCode TSGetStages_Theta(TS ts,PetscInt *ns,Vec *Y[])
1220: {
1221: TS_Theta *th = (TS_Theta*)ts->data;
1224: if (ns) {
1225: if (!th->endpoint && th->Theta != 1.0) *ns = 1; /* midpoint form */
1226: else *ns = 2; /* endpoint form */
1227: }
1228: if (Y) {
1229: if (!th->endpoint && th->Theta != 1.0) {
1230: th->Stages[0] = th->X;
1231: } else {
1232: th->Stages[0] = th->X0;
1233: th->Stages[1] = ts->vec_sol; /* stiffly accurate */
1234: }
1235: *Y = th->Stages;
1236: }
1237: return(0);
1238: }
1240: /* ------------------------------------------------------------ */
1241: /*MC
1242: TSTHETA - DAE solver using the implicit Theta method
1244: Level: beginner
1246: Options Database:
1247: + -ts_theta_theta <Theta> - Location of stage (0<Theta<=1)
1248: . -ts_theta_endpoint <flag> - Use the endpoint (like Crank-Nicholson) instead of midpoint form of the Theta method
1249: - -ts_theta_initial_guess_extrapolate <flg> - Extrapolate stage initial guess from previous solution (sometimes unstable)
1251: Notes:
1252: $ -ts_type theta -ts_theta_theta 1.0 corresponds to backward Euler (TSBEULER)
1253: $ -ts_type theta -ts_theta_theta 0.5 corresponds to the implicit midpoint rule
1254: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint corresponds to Crank-Nicholson (TSCN)
1256: The endpoint variant of the Theta method and backward Euler can be applied to DAE. The midpoint variant is not suitable for DAEs because it is not stiffly accurate.
1258: The midpoint variant is cast as a 1-stage implicit Runge-Kutta method.
1260: .vb
1261: Theta | Theta
1262: -------------
1263: | 1
1264: .ve
1266: For the default Theta=0.5, this is also known as the implicit midpoint rule.
1268: When the endpoint variant is chosen, the method becomes a 2-stage method with first stage explicit:
1270: .vb
1271: 0 | 0 0
1272: 1 | 1-Theta Theta
1273: -------------------
1274: | 1-Theta Theta
1275: .ve
1277: For the default Theta=0.5, this is the trapezoid rule (also known as Crank-Nicolson, see TSCN).
1279: To apply a diagonally implicit RK method to DAE, the stage formula
1281: $ Y_i = X + h sum_j a_ij Y'_j
1283: is interpreted as a formula for Y'_i in terms of Y_i and known values (Y'_j, j<i)
1285: .seealso: TSCreate(), TS, TSSetType(), TSCN, TSBEULER, TSThetaSetTheta(), TSThetaSetEndpoint()
1287: M*/
1288: PETSC_EXTERN PetscErrorCode TSCreate_Theta(TS ts)
1289: {
1290: TS_Theta *th;
1294: ts->ops->reset = TSReset_Theta;
1295: ts->ops->adjointreset = TSAdjointReset_Theta;
1296: ts->ops->destroy = TSDestroy_Theta;
1297: ts->ops->view = TSView_Theta;
1298: ts->ops->setup = TSSetUp_Theta;
1299: ts->ops->adjointsetup = TSAdjointSetUp_Theta;
1300: ts->ops->adjointreset = TSAdjointReset_Theta;
1301: ts->ops->step = TSStep_Theta;
1302: ts->ops->interpolate = TSInterpolate_Theta;
1303: ts->ops->evaluatewlte = TSEvaluateWLTE_Theta;
1304: ts->ops->rollback = TSRollBack_Theta;
1305: ts->ops->setfromoptions = TSSetFromOptions_Theta;
1306: ts->ops->snesfunction = SNESTSFormFunction_Theta;
1307: ts->ops->snesjacobian = SNESTSFormJacobian_Theta;
1308: #if defined(PETSC_HAVE_COMPLEX)
1309: ts->ops->linearstability = TSComputeLinearStability_Theta;
1310: #endif
1311: ts->ops->getstages = TSGetStages_Theta;
1312: ts->ops->adjointstep = TSAdjointStep_Theta;
1313: ts->ops->adjointintegral = TSAdjointCostIntegral_Theta;
1314: ts->ops->forwardintegral = TSForwardCostIntegral_Theta;
1315: ts->default_adapt_type = TSADAPTNONE;
1317: ts->ops->forwardsetup = TSForwardSetUp_Theta;
1318: ts->ops->forwardreset = TSForwardReset_Theta;
1319: ts->ops->forwardstep = TSForwardStep_Theta;
1320: ts->ops->forwardgetstages = TSForwardGetStages_Theta;
1322: ts->usessnes = PETSC_TRUE;
1324: PetscNewLog(ts,&th);
1325: ts->data = (void*)th;
1327: th->VecsDeltaLam = NULL;
1328: th->VecsDeltaMu = NULL;
1329: th->VecsSensiTemp = NULL;
1330: th->VecsSensi2Temp = NULL;
1332: th->extrapolate = PETSC_FALSE;
1333: th->Theta = 0.5;
1334: th->order = 2;
1335: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetTheta_C",TSThetaGetTheta_Theta);
1336: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetTheta_C",TSThetaSetTheta_Theta);
1337: PetscObjectComposeFunction((PetscObject)ts,"TSThetaGetEndpoint_C",TSThetaGetEndpoint_Theta);
1338: PetscObjectComposeFunction((PetscObject)ts,"TSThetaSetEndpoint_C",TSThetaSetEndpoint_Theta);
1339: return(0);
1340: }
1342: /*@
1343: TSThetaGetTheta - Get the abscissa of the stage in (0,1].
1345: Not Collective
1347: Input Parameter:
1348: . ts - timestepping context
1350: Output Parameter:
1351: . theta - stage abscissa
1353: Note:
1354: Use of this function is normally only required to hack TSTHETA to use a modified integration scheme.
1356: Level: Advanced
1358: .seealso: TSThetaSetTheta()
1359: @*/
1360: PetscErrorCode TSThetaGetTheta(TS ts,PetscReal *theta)
1361: {
1367: PetscUseMethod(ts,"TSThetaGetTheta_C",(TS,PetscReal*),(ts,theta));
1368: return(0);
1369: }
1371: /*@
1372: TSThetaSetTheta - Set the abscissa of the stage in (0,1].
1374: Not Collective
1376: Input Parameters:
1377: + ts - timestepping context
1378: - theta - stage abscissa
1380: Options Database:
1381: . -ts_theta_theta <theta>
1383: Level: Intermediate
1385: .seealso: TSThetaGetTheta()
1386: @*/
1387: PetscErrorCode TSThetaSetTheta(TS ts,PetscReal theta)
1388: {
1393: PetscTryMethod(ts,"TSThetaSetTheta_C",(TS,PetscReal),(ts,theta));
1394: return(0);
1395: }
1397: /*@
1398: TSThetaGetEndpoint - Gets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1400: Not Collective
1402: Input Parameter:
1403: . ts - timestepping context
1405: Output Parameter:
1406: . endpoint - PETSC_TRUE when using the endpoint variant
1408: Level: Advanced
1410: .seealso: TSThetaSetEndpoint(), TSTHETA, TSCN
1411: @*/
1412: PetscErrorCode TSThetaGetEndpoint(TS ts,PetscBool *endpoint)
1413: {
1419: PetscUseMethod(ts,"TSThetaGetEndpoint_C",(TS,PetscBool*),(ts,endpoint));
1420: return(0);
1421: }
1423: /*@
1424: TSThetaSetEndpoint - Sets whether to use the endpoint variant of the method (e.g. trapezoid/Crank-Nicolson instead of midpoint rule).
1426: Not Collective
1428: Input Parameters:
1429: + ts - timestepping context
1430: - flg - PETSC_TRUE to use the endpoint variant
1432: Options Database:
1433: . -ts_theta_endpoint <flg>
1435: Level: Intermediate
1437: .seealso: TSTHETA, TSCN
1438: @*/
1439: PetscErrorCode TSThetaSetEndpoint(TS ts,PetscBool flg)
1440: {
1445: PetscTryMethod(ts,"TSThetaSetEndpoint_C",(TS,PetscBool),(ts,flg));
1446: return(0);
1447: }
1449: /*
1450: * TSBEULER and TSCN are straightforward specializations of TSTHETA.
1451: * The creation functions for these specializations are below.
1452: */
1454: static PetscErrorCode TSSetUp_BEuler(TS ts)
1455: {
1456: TS_Theta *th = (TS_Theta*)ts->data;
1460: if (th->Theta != 1.0) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (1) of theta when using backward Euler\n");
1461: if (th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the endpoint form of the Theta methods when using backward Euler\n");
1462: TSSetUp_Theta(ts);
1463: return(0);
1464: }
1466: static PetscErrorCode TSView_BEuler(TS ts,PetscViewer viewer)
1467: {
1469: return(0);
1470: }
1472: /*MC
1473: TSBEULER - ODE solver using the implicit backward Euler method
1475: Level: beginner
1477: Notes:
1478: TSBEULER is equivalent to TSTHETA with Theta=1.0
1480: $ -ts_type theta -ts_theta_theta 1.0
1482: .seealso: TSCreate(), TS, TSSetType(), TSEULER, TSCN, TSTHETA
1484: M*/
1485: PETSC_EXTERN PetscErrorCode TSCreate_BEuler(TS ts)
1486: {
1490: TSCreate_Theta(ts);
1491: TSThetaSetTheta(ts,1.0);
1492: TSThetaSetEndpoint(ts,PETSC_FALSE);
1493: ts->ops->setup = TSSetUp_BEuler;
1494: ts->ops->view = TSView_BEuler;
1495: return(0);
1496: }
1498: static PetscErrorCode TSSetUp_CN(TS ts)
1499: {
1500: TS_Theta *th = (TS_Theta*)ts->data;
1504: if (th->Theta != 0.5) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change the default value (0.5) of theta when using Crank-Nicolson\n");
1505: if (!th->endpoint) SETERRQ(PetscObjectComm((PetscObject)ts),PETSC_ERR_OPT_OVERWRITE,"Can not change to the midpoint form of the Theta methods when using Crank-Nicolson\n");
1506: TSSetUp_Theta(ts);
1507: return(0);
1508: }
1510: static PetscErrorCode TSView_CN(TS ts,PetscViewer viewer)
1511: {
1513: return(0);
1514: }
1516: /*MC
1517: TSCN - ODE solver using the implicit Crank-Nicolson method.
1519: Level: beginner
1521: Notes:
1522: TSCN is equivalent to TSTHETA with Theta=0.5 and the "endpoint" option set. I.e.
1524: $ -ts_type theta -ts_theta_theta 0.5 -ts_theta_endpoint
1526: .seealso: TSCreate(), TS, TSSetType(), TSBEULER, TSTHETA
1528: M*/
1529: PETSC_EXTERN PetscErrorCode TSCreate_CN(TS ts)
1530: {
1534: TSCreate_Theta(ts);
1535: TSThetaSetTheta(ts,0.5);
1536: TSThetaSetEndpoint(ts,PETSC_TRUE);
1537: ts->ops->setup = TSSetUp_CN;
1538: ts->ops->view = TSView_CN;
1539: return(0);
1540: }