Actual source code: ntr.c

  1: #include "src/matrix/lmvmmat.h"
  2: #include "ntr.h"

  4: #include "petscksp.h"
  5: #include "petscpc.h"
  6: #include "private/kspimpl.h"
  7: #include "private/pcimpl.h"

  9: #define NTR_KSP_NASH    0
 10: #define NTR_KSP_STCG    1
 11: #define NTR_KSP_GLTR    2
 12: #define NTR_KSP_TYPES   3

 14: #define NTR_PC_NONE        0
 15: #define NTR_PC_AHESS    1
 16: #define NTR_PC_BFGS     2
 17: #define NTR_PC_PETSC    3
 18: #define NTR_PC_TYPES    4

 20: #define BFGS_SCALE_AHESS   0
 21: #define BFGS_SCALE_BFGS    1
 22: #define BFGS_SCALE_TYPES   2

 24: #define NTR_INIT_CONSTANT          0
 25: #define NTR_INIT_DIRECTION          1
 26: #define NTR_INIT_INTERPOLATION          2
 27: #define NTR_INIT_TYPES                  3

 29: #define NTR_UPDATE_REDUCTION      0
 30: #define NTR_UPDATE_INTERPOLATION  1
 31: #define NTR_UPDATE_TYPES          2

 33: static const char *NTR_KSP[64] = {
 34:   "nash", "stcg", "gltr"
 35: };

 37: static const char *NTR_PC[64] = {
 38:   "none", "ahess", "bfgs", "petsc"
 39: };

 41: static const char *BFGS_SCALE[64] = {
 42:   "ahess", "bfgs"
 43: };

 45: static const char *NTR_INIT[64] = {
 46:   "constant", "direction", "interpolation"
 47: };

 49: static const char *NTR_UPDATE[64] = {
 50:   "reduction", "interpolation"
 51: };

 53: /*  Routine for BFGS preconditioner */
 54: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec xin, Vec xout);

 56: /*
 57:    TaoSolve_NTR - Implements Newton's Method with a trust region approach 
 58:    for solving unconstrained minimization problems.  
 59:   
 60:    The basic algorithm is taken from MINPACK-2 (dstrn).
 61:   
 62:    TaoSolve_NTR computes a local minimizer of a twice differentiable function
 63:    f by applying a trust region variant of Newton's method.  At each stage 
 64:    of the algorithm, we use the prconditioned conjugate gradient method to
 65:    determine an approximate minimizer of the quadratic equation
 66:   
 67:         q(s) = <s, Hs + g>
 68:   
 69:    subject to the trust region constraint
 70:   
 71:         || s ||_M <= radius,
 72:   
 73:    where radius is the trust region radius and M is a symmetric positive
 74:    definite matrix (the preconditioner).  Here g is the gradient and H 
 75:    is the Hessian matrix.
 76:   
 77:    Note:  TaoSolve_NTR MUST use the iterative solver KSPNASH, KSPSTCG,
 78:           or KSPGLTR.  Thus, we set KSPNASH, KSPSTCG, or KSPGLTR in this 
 79:           routine regardless of what the user may have previously specified.
 80: */
 83: static PetscErrorCode TaoSolve_NTR(TaoSolver tao)
 84: {
 85:   TAO_NTR *tr = (TAO_NTR *)tao->data;

 87:   PC pc;

 89:   KSPConvergedReason ksp_reason;
 90:   TaoSolverTerminationReason reason;

 92:   MatStructure matflag;
 93:   
 94:   PetscReal fmin, ftrial, prered, actred, kappa, sigma, beta;
 95:   PetscReal tau, tau_1, tau_2, tau_max, tau_min, max_radius;
 96:   PetscReal f, gnorm;

 98:   PetscReal delta;
 99:   PetscReal norm_d;

102:   PetscInt iter = 0;
103:   PetscInt bfgsUpdates = 0;
104:   PetscInt needH;

106:   PetscInt i_max = 5;
107:   PetscInt j_max = 1;
108:   PetscInt i, j, N, n, its;


112:   if (tao->XL || tao->XU || tao->ops->computebounds) {
113:     PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by ntr algorithm\n"); 
114:   }

116:   tao->trust = tao->trust0;

118:   /* Modify the radius if it is too large or small */
119:   tao->trust = PetscMax(tao->trust, tr->min_radius);
120:   tao->trust = PetscMin(tao->trust, tr->max_radius);


123:   if (NTR_PC_BFGS == tr->pc_type && !tr->M) {
124:     VecGetLocalSize(tao->solution,&n); 
125:     VecGetSize(tao->solution,&N); 
126:     MatCreateLMVM(((PetscObject)tao)->comm,n,N,&tr->M); 
127:     MatLMVMAllocateVectors(tr->M,tao->solution); 
128:   }

130:   /* Check convergence criteria */
131:   TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient); 
132:   VecNorm(tao->gradient,NORM_2,&gnorm); 
133:   if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) {
134:     SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
135:   }
136:   needH = 1;

138:   TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason); 
139:   if (reason != TAO_CONTINUE_ITERATING) {
140:     return(0);
141:   }

143:   /* Create vectors for the limited memory preconditioner */
144:   if ((NTR_PC_BFGS == tr->pc_type) && 
145:       (BFGS_SCALE_BFGS != tr->bfgs_scale_type)) {
146:     if (!tr->Diag) {
147:         VecDuplicate(tao->solution, &tr->Diag); 
148:     }
149:   }
150:  
151:   switch(tr->ksp_type) {
152:   case NTR_KSP_NASH:
153:     KSPSetType(tao->ksp, KSPNASH); 
154:     if (tao->ksp->ops->setfromoptions) {
155:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
156:     }
157:     break;

159:   case NTR_KSP_STCG:
160:     KSPSetType(tao->ksp, KSPSTCG); 
161:     if (tao->ksp->ops->setfromoptions) {
162:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
163:     }
164:     break;

166:   default:
167:     KSPSetType(tao->ksp, KSPGLTR); 
168:     if (tao->ksp->ops->setfromoptions) {
169:       (*tao->ksp->ops->setfromoptions)(tao->ksp);
170:     }
171:     break;
172:   }

174:   /*  Modify the preconditioner to use the bfgs approximation */
175:   KSPGetPC(tao->ksp, &pc); 
176:   switch(tr->pc_type) {
177:   case NTR_PC_NONE:
178:     PCSetType(pc, PCNONE); 
179:     if (pc->ops->setfromoptions) {
180:       (*pc->ops->setfromoptions)(pc);
181:     }
182:     break;
183:  
184:   case NTR_PC_AHESS:
185:     PCSetType(pc, PCJACOBI); 
186:     if (pc->ops->setfromoptions) {
187:       (*pc->ops->setfromoptions)(pc);
188:     }
189:     PCJacobiSetUseAbs(pc); 
190:     break;

192:   case NTR_PC_BFGS:
193:     PCSetType(pc, PCSHELL); 
194:     if (pc->ops->setfromoptions) {
195:       (*pc->ops->setfromoptions)(pc);
196:     }
197:     PCShellSetName(pc, "bfgs"); 
198:     PCShellSetContext(pc, tr->M); 
199:     PCShellSetApply(pc, MatLMVMSolveShell); 
200:     break;

202:   default:
203:     /*  Use the pc method set by pc_type */
204:     break;
205:   }

207:   /*  Initialize trust-region radius */
208:   switch(tr->init_type) {
209:   case NTR_INIT_CONSTANT:
210:     /*  Use the initial radius specified */
211:     break;

213:   case NTR_INIT_INTERPOLATION:
214:     /*  Use the initial radius specified */
215:     max_radius = 0.0;

217:     for (j = 0; j < j_max; ++j) {
218:       fmin = f;
219:       sigma = 0.0;

221:       if (needH) {
222:           TaoComputeHessian(tao, tao->solution, &tao->hessian, &tao->hessian_pre, &matflag); 
223:         needH = 0;
224:       }

226:       for (i = 0; i < i_max; ++i) {

228:         VecCopy(tao->solution, tr->W); 
229:         VecAXPY(tr->W, -tao->trust/gnorm, tao->gradient); 
230:         TaoComputeObjective(tao, tr->W, &ftrial); 

232:         if (PetscIsInfOrNanReal(ftrial)) {
233:           tau = tr->gamma1_i;
234:         }
235:         else {
236:           if (ftrial < fmin) {
237:             fmin = ftrial;
238:             sigma = -tao->trust / gnorm;
239:           }

241:           MatMult(tao->hessian, tao->gradient, tao->stepdirection); 
242:           VecDot(tao->gradient, tao->stepdirection, &prered); 

244:           prered = tao->trust * (gnorm - 0.5 * tao->trust * prered / (gnorm * gnorm));
245:           actred = f - ftrial;
246:           if ((PetscAbsScalar(actred) <= tr->epsilon) && 
247:               (PetscAbsScalar(prered) <= tr->epsilon)) {
248:             kappa = 1.0;
249:           }
250:           else {
251:             kappa = actred / prered;
252:           }

254:           tau_1 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust + (1.0 - tr->theta_i) * prered - actred);
255:           tau_2 = tr->theta_i * gnorm * tao->trust / (tr->theta_i * gnorm * tao->trust - (1.0 + tr->theta_i) * prered + actred);
256:           tau_min = PetscMin(tau_1, tau_2);
257:           tau_max = PetscMax(tau_1, tau_2);

259:           if (PetscAbsScalar(kappa - 1.0) <= tr->mu1_i) {
260:             /*  Great agreement */
261:             max_radius = PetscMax(max_radius, tao->trust);

263:             if (tau_max < 1.0) {
264:               tau = tr->gamma3_i;
265:             }
266:             else if (tau_max > tr->gamma4_i) {
267:               tau = tr->gamma4_i;
268:             }
269:             else {
270:               tau = tau_max;
271:             }
272:           }
273:           else if (PetscAbsScalar(kappa - 1.0) <= tr->mu2_i) {
274:             /*  Good agreement */
275:             max_radius = PetscMax(max_radius, tao->trust);

277:             if (tau_max < tr->gamma2_i) {
278:               tau = tr->gamma2_i;
279:             }
280:             else if (tau_max > tr->gamma3_i) {
281:               tau = tr->gamma3_i;
282:             }
283:             else {
284:               tau = tau_max;
285:             }
286:           }
287:           else {
288:             /*  Not good agreement */
289:             if (tau_min > 1.0) {
290:               tau = tr->gamma2_i;
291:             }
292:             else if (tau_max < tr->gamma1_i) {
293:               tau = tr->gamma1_i;
294:             }
295:             else if ((tau_min < tr->gamma1_i) && (tau_max >= 1.0)) {
296:               tau = tr->gamma1_i;
297:             }
298:             else if ((tau_1 >= tr->gamma1_i) && (tau_1 < 1.0) && 
299:                      ((tau_2 < tr->gamma1_i) || (tau_2 >= 1.0))) {
300:               tau = tau_1;
301:             }
302:             else if ((tau_2 >= tr->gamma1_i) && (tau_2 < 1.0) && 
303:                      ((tau_1 < tr->gamma1_i) || (tau_2 >= 1.0))) {
304:               tau = tau_2;
305:             }
306:             else {
307:               tau = tau_max;
308:             }
309:           }
310:         }
311:         tao->trust = tau * tao->trust;
312:       }
313:   
314:       if (fmin < f) {
315:         f = fmin;
316:         VecAXPY(tao->solution, sigma, tao->gradient); 
317:         TaoComputeGradient(tao,tao->solution, tao->gradient); 
318:         
319:         VecNorm(tao->gradient, NORM_2, &gnorm); 

321:         if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) {
322:           SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
323:         }
324:         needH = 1;

326:         TaoMonitor(tao, iter, f, gnorm, 0.0, 1.0, &reason); 
327:         if (reason != TAO_CONTINUE_ITERATING) {
328:           return(0);
329:         }
330:       }
331:     }
332:     tao->trust = PetscMax(tao->trust, max_radius);

334:     /*  Modify the radius if it is too large or small */
335:     tao->trust = PetscMax(tao->trust, tr->min_radius);
336:     tao->trust = PetscMin(tao->trust, tr->max_radius);
337:     break;

339:   default:
340:     /*  Norm of the first direction will initialize radius */
341:     tao->trust = 0.0;
342:     break;
343:   }

345:   /* Set initial scaling for the BFGS preconditioner 
346:      This step is done after computing the initial trust-region radius
347:      since the function value may have decreased */
348:   if (NTR_PC_BFGS == tr->pc_type) {
349:     if (f != 0.0) {
350:       delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
351:     }
352:     else {
353:       delta = 2.0 / (gnorm*gnorm);
354:     }
355:     MatLMVMSetDelta(tr->M,delta); 
356:   }

358:   /* Have not converged; continue with Newton method */
359:   while (reason == TAO_CONTINUE_ITERATING) {
360:     ++iter;

362:     /* Compute the Hessian */
363:     if (needH) {
364:       TaoComputeHessian(tao, tao->solution, &tao->hessian, &tao->hessian_pre, &matflag); 
365:       needH = 0;
366:     }

368:     if (NTR_PC_BFGS == tr->pc_type) {
369:       if (BFGS_SCALE_AHESS == tr->bfgs_scale_type) {
370:         /* Obtain diagonal for the bfgs preconditioner */
371:         MatGetDiagonal(tao->hessian, tr->Diag); 
372:         VecAbs(tr->Diag); 
373:         VecReciprocal(tr->Diag); 
374:         MatLMVMSetScale(tr->M,tr->Diag); 
375:       }

377:       /* Update the limited memory preconditioner */
378:       MatLMVMUpdate(tr->M, tao->solution, tao->gradient); 
379:       ++bfgsUpdates;
380:     }

382:     while (reason == TAO_CONTINUE_ITERATING) {
383:       KSPSetOperators(tao->ksp, tao->hessian, tao->hessian_pre, matflag); 
384:       
385:       /* Solve the trust region subproblem */
386:       if (NTR_KSP_NASH == tr->ksp_type) {
387:         KSPNASHSetRadius(tao->ksp,tao->trust); 
388:         KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
389:         KSPGetIterationNumber(tao->ksp,&its); 
390:         tao->ksp_its+=its;
391:         KSPNASHGetNormD(tao->ksp, &norm_d); 
392:       } else if (NTR_KSP_STCG == tr->ksp_type) {
393:         KSPSTCGSetRadius(tao->ksp,tao->trust); 
394:         KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
395:         KSPGetIterationNumber(tao->ksp,&its); 
396:         tao->ksp_its+=its;
397:         KSPSTCGGetNormD(tao->ksp, &norm_d); 
398:       } else { /* NTR_KSP_GLTR */
399:         KSPGLTRSetRadius(tao->ksp,tao->trust); 
400:         KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
401:         KSPGetIterationNumber(tao->ksp,&its); 
402:         tao->ksp_its+=its;
403:         KSPGLTRGetNormD(tao->ksp, &norm_d); 
404:       }

406:       if (0.0 == tao->trust) {
407:         /* Radius was uninitialized; use the norm of the direction */
408:         if (norm_d > 0.0) {
409:           tao->trust = norm_d;

411:           /* Modify the radius if it is too large or small */
412:           tao->trust = PetscMax(tao->trust, tr->min_radius);
413:           tao->trust = PetscMin(tao->trust, tr->max_radius);
414:         }
415:         else {
416:           /* The direction was bad; set radius to default value and re-solve 
417:              the trust-region subproblem to get a direction */
418:           tao->trust = tao->trust0;

420:           /* Modify the radius if it is too large or small */
421:           tao->trust = PetscMax(tao->trust, tr->min_radius);
422:           tao->trust = PetscMin(tao->trust, tr->max_radius);
423:           
424:           if (NTR_KSP_NASH == tr->ksp_type) {
425:             KSPNASHSetRadius(tao->ksp,tao->trust); 
426:             KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
427:             KSPGetIterationNumber(tao->ksp,&its); 
428:             tao->ksp_its+=its;
429:             KSPNASHGetNormD(tao->ksp, &norm_d); 
430:           } else if (NTR_KSP_STCG == tr->ksp_type) {
431:             KSPSTCGSetRadius(tao->ksp,tao->trust); 
432:             KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
433:             KSPGetIterationNumber(tao->ksp,&its); 
434:             tao->ksp_its+=its;
435:             KSPSTCGGetNormD(tao->ksp, &norm_d); 
436:           } else { /* NTR_KSP_GLTR */
437:             KSPGLTRSetRadius(tao->ksp,tao->trust); 
438:             KSPSolve(tao->ksp, tao->gradient, tao->stepdirection); 
439:             KSPGetIterationNumber(tao->ksp,&its); 
440:             tao->ksp_its+=its;
441:             KSPGLTRGetNormD(tao->ksp, &norm_d); 
442:           }

444:           if (norm_d == 0.0) {
445:             SETERRQ(PETSC_COMM_SELF,1, "Initial direction zero");
446:           }
447:         }
448:       }
449:       VecScale(tao->stepdirection, -1.0); 
450:       KSPGetConvergedReason(tao->ksp, &ksp_reason); 
451:       if ((KSP_DIVERGED_INDEFINITE_PC == ksp_reason) &&
452:           (NTR_PC_BFGS == tr->pc_type) && (bfgsUpdates > 1)) {
453:         /* Preconditioner is numerically indefinite; reset the
454:            approximate if using BFGS preconditioning. */
455:   
456:         if (f != 0.0) {
457:           delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
458:         }
459:         else {
460:           delta = 2.0 / (gnorm*gnorm);
461:         }
462:         MatLMVMSetDelta(tr->M, delta); 
463:         MatLMVMReset(tr->M); 
464:         MatLMVMUpdate(tr->M, tao->solution, tao->gradient); 
465:         bfgsUpdates = 1;
466:       }

468:       if (NTR_UPDATE_REDUCTION == tr->update_type) {
469:         /* Get predicted reduction */
470:         if (NTR_KSP_NASH == tr->ksp_type) {
471:           KSPNASHGetObjFcn(tao->ksp,&prered); 
472:         } else if (NTR_KSP_STCG == tr->ksp_type) {
473:           KSPSTCGGetObjFcn(tao->ksp,&prered); 
474:         } else { /* gltr */
475:           KSPGLTRGetObjFcn(tao->ksp,&prered); 
476:         }
477:         
478:         if (prered >= 0.0) {
479:           /* The predicted reduction has the wrong sign.  This cannot
480:              happen in infinite precision arithmetic.  Step should
481:              be rejected! */
482:           tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
483:         }
484:         else {
485:           /* Compute trial step and function value */
486:           VecCopy(tao->solution,tr->W); 
487:           VecAXPY(tr->W, 1.0, tao->stepdirection); 
488:           TaoComputeObjective(tao, tr->W, &ftrial); 

490:           if (PetscIsInfOrNanReal(ftrial)) {
491:             tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
492:           } else {
493:             /* Compute and actual reduction */
494:             actred = f - ftrial;
495:             prered = -prered;
496:             if ((PetscAbsScalar(actred) <= tr->epsilon) && 
497:                 (PetscAbsScalar(prered) <= tr->epsilon)) {
498:               kappa = 1.0;
499:             }
500:             else {
501:               kappa = actred / prered;
502:             }
503:             
504:             /* Accept or reject the step and update radius */
505:             if (kappa < tr->eta1) {
506:               /* Reject the step */
507:               tao->trust = tr->alpha1 * PetscMin(tao->trust, norm_d);
508:             } 
509:             else {
510:               /* Accept the step */
511:               if (kappa < tr->eta2) { 
512:                 /* Marginal bad step */
513:                 tao->trust = tr->alpha2 * PetscMin(tao->trust, norm_d);
514:               }
515:               else if (kappa < tr->eta3) {
516:                 /* Reasonable step */
517:                 tao->trust = tr->alpha3 * tao->trust;
518:               }
519:               else if (kappa < tr->eta4) { 
520:                 /* Good step */
521:                 tao->trust = PetscMax(tr->alpha4 * norm_d, tao->trust);
522:               }
523:               else {
524:                 /* Very good step */
525:                 tao->trust = PetscMax(tr->alpha5 * norm_d, tao->trust);
526:               }
527:               break;
528:             }
529:           }
530:         } 
531:       }
532:       else {
533:         /* Get predicted reduction */
534:         if (NTR_KSP_NASH == tr->ksp_type) {
535:           KSPNASHGetObjFcn(tao->ksp,&prered); 
536:         } else if (NTR_KSP_STCG == tr->ksp_type) {
537:           KSPSTCGGetObjFcn(tao->ksp,&prered); 
538:         } else { /* gltr */
539:           KSPGLTRGetObjFcn(tao->ksp,&prered); 
540:         }

542:         if (prered >= 0.0) {
543:           /* The predicted reduction has the wrong sign.  This cannot
544:              happen in infinite precision arithmetic.  Step should
545:              be rejected! */
546:           tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
547:         }
548:         else {
549:           VecCopy(tao->solution, tr->W); 
550:           VecAXPY(tr->W, 1.0, tao->stepdirection); 
551:           TaoComputeObjective(tao, tr->W, &ftrial); 
552:           if (PetscIsInfOrNanReal(ftrial)) {
553:             tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
554:           }
555:           else {
556:             VecDot(tao->gradient, tao->stepdirection, &beta); 
557:             actred = f - ftrial;
558:             prered = -prered;
559:             if ((PetscAbsScalar(actred) <= tr->epsilon) && 
560:                 (PetscAbsScalar(prered) <= tr->epsilon)) {
561:               kappa = 1.0;
562:             }
563:             else {
564:               kappa = actred / prered;
565:             }

567:             tau_1 = tr->theta * beta / (tr->theta * beta - (1.0 - tr->theta) * prered + actred);
568:             tau_2 = tr->theta * beta / (tr->theta * beta + (1.0 + tr->theta) * prered - actred);
569:             tau_min = PetscMin(tau_1, tau_2);
570:             tau_max = PetscMax(tau_1, tau_2);

572:             if (kappa >= 1.0 - tr->mu1) {
573:               /* Great agreement; accept step and update radius */
574:               if (tau_max < 1.0) {
575:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
576:               }
577:               else if (tau_max > tr->gamma4) {
578:                 tao->trust = PetscMax(tao->trust, tr->gamma4 * norm_d);
579:               }
580:               else {
581:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
582:               }
583:               break;
584:             }
585:             else if (kappa >= 1.0 - tr->mu2) {
586:               /* Good agreement */

588:               if (tau_max < tr->gamma2) {
589:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
590:               }
591:               else if (tau_max > tr->gamma3) {
592:                 tao->trust = PetscMax(tao->trust, tr->gamma3 * norm_d);
593:               }
594:               else if (tau_max < 1.0) {
595:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
596:               }
597:               else {
598:                 tao->trust = PetscMax(tao->trust, tau_max * norm_d);
599:               }
600:               break;
601:             }
602:             else {
603:               /* Not good agreement */
604:               if (tau_min > 1.0) {
605:                 tao->trust = tr->gamma2 * PetscMin(tao->trust, norm_d);
606:               }
607:               else if (tau_max < tr->gamma1) {
608:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
609:               }
610:               else if ((tau_min < tr->gamma1) && (tau_max >= 1.0)) {
611:                 tao->trust = tr->gamma1 * PetscMin(tao->trust, norm_d);
612:               }
613:               else if ((tau_1 >= tr->gamma1) && (tau_1 < 1.0) && 
614:                        ((tau_2 < tr->gamma1) || (tau_2 >= 1.0))) {
615:                 tao->trust = tau_1 * PetscMin(tao->trust, norm_d);
616:               }
617:               else if ((tau_2 >= tr->gamma1) && (tau_2 < 1.0) && 
618:                        ((tau_1 < tr->gamma1) || (tau_2 >= 1.0))) {
619:                 tao->trust = tau_2 * PetscMin(tao->trust, norm_d);
620:               }
621:               else {
622:                 tao->trust = tau_max * PetscMin(tao->trust, norm_d);
623:               }
624:             }
625:           }
626:         }
627:       }

629:       /* The step computed was not good and the radius was decreased.
630:          Monitor the radius to terminate. */
631:       TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason); 
632:     }

634:     /* The radius may have been increased; modify if it is too large */
635:     tao->trust = PetscMin(tao->trust, tr->max_radius);

637:     if (reason == TAO_CONTINUE_ITERATING) {
638:       VecCopy(tr->W, tao->solution); 
639:       f = ftrial;
640:       TaoComputeGradient(tao, tao->solution, tao->gradient);
641:       VecNorm(tao->gradient, NORM_2, &gnorm); 
642:       if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) {
643:         SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
644:       }
645:       needH = 1;
646:       TaoMonitor(tao, iter, f, gnorm, 0.0, tao->trust, &reason); 
647:     }
648:   }
649:   return(0);
650: }

652: /*------------------------------------------------------------*/
655: static PetscErrorCode TaoSetUp_NTR(TaoSolver tao)
656: {
657:   TAO_NTR *tr = (TAO_NTR *)tao->data;


662:   if (!tao->gradient) {VecDuplicate(tao->solution, &tao->gradient); }
663:   if (!tao->stepdirection) {VecDuplicate(tao->solution, &tao->stepdirection); }
664:   if (!tr->W) {VecDuplicate(tao->solution, &tr->W); }  

666:   tr->Diag = 0;
667:   tr->M = 0;


670:   return(0);
671: }

673: /*------------------------------------------------------------*/
676: static PetscErrorCode TaoDestroy_NTR(TaoSolver tao)
677: {
678:   TAO_NTR *tr = (TAO_NTR *)tao->data;

682:   if (tao->setupcalled) {
683:     VecDestroy(&tr->W); 
684:   }
685:   if (tr->M) {
686:     MatDestroy(&tr->M); 
687:     tr->M = PETSC_NULL;
688:   }
689:   if (tr->Diag) {
690:     VecDestroy(&tr->Diag); 
691:     tr->Diag = PETSC_NULL;
692:   }
693:   PetscFree(tao->data); 
694:   tao->data = PETSC_NULL;

696:   return(0);
697: }

699: /*------------------------------------------------------------*/
702: static PetscErrorCode TaoSetFromOptions_NTR(TaoSolver tao)
703: {
704:   TAO_NTR *tr = (TAO_NTR *)tao->data;

708:   PetscOptionsHead("Newton trust region method for unconstrained optimization"); 
709:   PetscOptionsEList("-tao_ntr_ksp_type", "ksp type", "", NTR_KSP, NTR_KSP_TYPES, NTR_KSP[tr->ksp_type], &tr->ksp_type, 0); 
710:   PetscOptionsEList("-tao_ntr_pc_type", "pc type", "", NTR_PC, NTR_PC_TYPES, NTR_PC[tr->pc_type], &tr->pc_type, 0); 
711:   PetscOptionsEList("-tao_ntr_bfgs_scale_type", "bfgs scale type", "", BFGS_SCALE, BFGS_SCALE_TYPES, BFGS_SCALE[tr->bfgs_scale_type], &tr->bfgs_scale_type, 0); 
712:   PetscOptionsEList("-tao_ntr_init_type", "tao->trust initialization type", "", NTR_INIT, NTR_INIT_TYPES, NTR_INIT[tr->init_type], &tr->init_type, 0); 
713:   PetscOptionsEList("-tao_ntr_update_type", "radius update type", "", NTR_UPDATE, NTR_UPDATE_TYPES, NTR_UPDATE[tr->update_type], &tr->update_type, 0); 
714:   PetscOptionsReal("-tao_ntr_eta1", "step is unsuccessful if actual reduction < eta1 * predicted reduction", "", tr->eta1, &tr->eta1, 0); 
715:   PetscOptionsReal("-tao_ntr_eta2", "", "", tr->eta2, &tr->eta2, 0); 
716:   PetscOptionsReal("-tao_ntr_eta3", "", "", tr->eta3, &tr->eta3, 0); 
717:   PetscOptionsReal("-tao_ntr_eta4", "", "", tr->eta4, &tr->eta4, 0); 
718:   PetscOptionsReal("-tao_ntr_alpha1", "", "", tr->alpha1, &tr->alpha1, 0); 
719:   PetscOptionsReal("-tao_ntr_alpha2", "", "", tr->alpha2, &tr->alpha2, 0); 
720:   PetscOptionsReal("-tao_ntr_alpha3", "", "", tr->alpha3, &tr->alpha3, 0); 
721:   PetscOptionsReal("-tao_ntr_alpha4", "", "", tr->alpha4, &tr->alpha4, 0); 
722:   PetscOptionsReal("-tao_ntr_alpha5", "", "", tr->alpha5, &tr->alpha5, 0); 
723:   PetscOptionsReal("-tao_ntr_mu1", "", "", tr->mu1, &tr->mu1, 0); 
724:   PetscOptionsReal("-tao_ntr_mu2", "", "", tr->mu2, &tr->mu2, 0); 
725:   PetscOptionsReal("-tao_ntr_gamma1", "", "", tr->gamma1, &tr->gamma1, 0); 
726:   PetscOptionsReal("-tao_ntr_gamma2", "", "", tr->gamma2, &tr->gamma2, 0); 
727:   PetscOptionsReal("-tao_ntr_gamma3", "", "", tr->gamma3, &tr->gamma3, 0); 
728:   PetscOptionsReal("-tao_ntr_gamma4", "", "", tr->gamma4, &tr->gamma4, 0); 
729:   PetscOptionsReal("-tao_ntr_theta", "", "", tr->theta, &tr->theta, 0); 
730:   PetscOptionsReal("-tao_ntr_mu1_i", "", "", tr->mu1_i, &tr->mu1_i, 0); 
731:   PetscOptionsReal("-tao_ntr_mu2_i", "", "", tr->mu2_i, &tr->mu2_i, 0); 
732:   PetscOptionsReal("-tao_ntr_gamma1_i", "", "", tr->gamma1_i, &tr->gamma1_i, 0); 
733:   PetscOptionsReal("-tao_ntr_gamma2_i", "", "", tr->gamma2_i, &tr->gamma2_i, 0); 
734:   PetscOptionsReal("-tao_ntr_gamma3_i", "", "", tr->gamma3_i, &tr->gamma3_i, 0); 
735:   PetscOptionsReal("-tao_ntr_gamma4_i", "", "", tr->gamma4_i, &tr->gamma4_i, 0); 
736:   PetscOptionsReal("-tao_ntr_theta_i", "", "", tr->theta_i, &tr->theta_i, 0); 
737:   PetscOptionsReal("-tao_ntr_min_radius", "lower bound on initial trust-region radius", "", tr->min_radius, &tr->min_radius, 0); 
738:   PetscOptionsReal("-tao_ntr_max_radius", "upper bound on trust-region radius", "", tr->max_radius, &tr->max_radius, 0); 
739:   PetscOptionsReal("-tao_ntr_epsilon", "tolerance used when computing actual and predicted reduction", "", tr->epsilon, &tr->epsilon, 0); 
740:   PetscOptionsTail(); 
741:   KSPSetFromOptions(tao->ksp); 
742:   return(0);
743: }

745: /*------------------------------------------------------------*/
748: static PetscErrorCode TaoView_NTR(TaoSolver tao, PetscViewer viewer)
749: {
750:   TAO_NTR *tr = (TAO_NTR *)tao->data;
752:   PetscInt nrejects;
753:   PetscBool isascii;
755:   PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
756:   if (isascii) {
757:     PetscViewerASCIIPushTab(viewer); 
758:     if (NTR_PC_BFGS == tr->pc_type && tr->M) {
759:       MatLMVMGetRejects(tr->M, &nrejects); 
760:       PetscViewerASCIIPrintf(viewer, "Rejected matrix updates: %D\n", nrejects); 
761:     }
762:     PetscViewerASCIIPopTab(viewer); 

764:   } else {
765:     SETERRQ1(((PetscObject)tao)->comm,PETSC_ERR_SUP,"Viewer type %s not supported for TAO NTR",((PetscObject)viewer)->type_name);
766:   }
767:   return(0);
768: }

770: /*------------------------------------------------------------*/
774: PetscErrorCode TaoCreate_NTR(TaoSolver tao)
775: {
776:   TAO_NTR *tr;


781:   PetscNewLog(tao, TAO_NTR, &tr); 

783:   tao->ops->setup = TaoSetUp_NTR;
784:   tao->ops->solve = TaoSolve_NTR;
785:   tao->ops->view = TaoView_NTR;
786:   tao->ops->setfromoptions = TaoSetFromOptions_NTR;
787:   tao->ops->destroy = TaoDestroy_NTR;
788:   
789:   tao->max_it = 50;
790:   tao->fatol = 1e-10;
791:   tao->frtol = 1e-10;
792:   tao->data = (void*)tr;

794:   tao->trust0 = 100.0;

796:   /*  Standard trust region update parameters */
797:   tr->eta1 = 1.0e-4;
798:   tr->eta2 = 0.25;
799:   tr->eta3 = 0.50;
800:   tr->eta4 = 0.90;

802:   tr->alpha1 = 0.25;
803:   tr->alpha2 = 0.50;
804:   tr->alpha3 = 1.00;
805:   tr->alpha4 = 2.00;
806:   tr->alpha5 = 4.00;

808:   /*  Interpolation parameters */
809:   tr->mu1_i = 0.35;
810:   tr->mu2_i = 0.50;

812:   tr->gamma1_i = 0.0625;
813:   tr->gamma2_i = 0.50;
814:   tr->gamma3_i = 2.00;
815:   tr->gamma4_i = 5.00;

817:   tr->theta_i = 0.25;

819:   /*  Interpolation trust region update parameters */
820:   tr->mu1 = 0.10;
821:   tr->mu2 = 0.50;

823:   tr->gamma1 = 0.25;
824:   tr->gamma2 = 0.50;
825:   tr->gamma3 = 2.00;
826:   tr->gamma4 = 4.00;

828:   tr->theta = 0.05;

830:   tr->min_radius = 1.0e-10;
831:   tr->max_radius = 1.0e10;
832:   tr->epsilon = 1.0e-6;

834:   tr->ksp_type        = NTR_KSP_STCG;
835:   tr->pc_type         = NTR_PC_BFGS;
836:   tr->bfgs_scale_type = BFGS_SCALE_AHESS;
837:   tr->init_type              = NTR_INIT_INTERPOLATION;
838:   tr->update_type     = NTR_UPDATE_REDUCTION;


841:   /* Set linear solver to default for trust region */
842:   KSPCreate(((PetscObject)tao)->comm, &tao->ksp); 

844:   return(0);


847: }


853: static PetscErrorCode MatLMVMSolveShell(PC pc, Vec b, Vec x) 
854: {
856:     Mat M;
861:     PCShellGetContext(pc,(void**)&M); 
862:     MatLMVMSolve(M, b, x); 
863:     return(0);
864: }