Actual source code: lmvm.c
1: #include "taolinesearch.h"
2: #include "src/matrix/lmvmmat.h"
3: #include "lmvm.h"
5: #define LMVM_BFGS 0
6: #define LMVM_SCALED_GRADIENT 1
7: #define LMVM_GRADIENT 2
11: static PetscErrorCode TaoSolve_LMVM(TaoSolver tao)
12: {
14: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
15:
16: PetscReal f, fold, gdx, gnorm;
17: PetscReal step = 1.0;
19: PetscReal delta;
22: PetscInt stepType;
23: PetscInt iter = 0;
24: TaoSolverTerminationReason reason = TAO_CONTINUE_ITERATING;
25: TaoLineSearchTerminationReason ls_status = TAOLINESEARCH_CONTINUE_ITERATING;
29: if (tao->XL || tao->XU || tao->ops->computebounds) {
30: PetscPrintf(((PetscObject)tao)->comm,"WARNING: Variable bounds have been set but will be ignored by lmvm algorithm\n");
31: }
33: /* Check convergence criteria */
34: TaoComputeObjectiveAndGradient(tao, tao->solution, &f, tao->gradient);
35: VecNorm(tao->gradient,NORM_2,&gnorm);
36: if (PetscIsInfOrNanReal(f) || PetscIsInfOrNanReal(gnorm)) {
37: SETERRQ(PETSC_COMM_SELF,1, "User provided compute function generated Inf or NaN");
38: }
40: TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason);
41: if (reason != TAO_CONTINUE_ITERATING) {
42: return(0);
43: }
45: /* Set initial scaling for the function */
46: if (f != 0.0) {
47: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
48: }
49: else {
50: delta = 2.0 / (gnorm*gnorm);
51: }
52: MatLMVMSetDelta(lmP->M,delta);
54: /* Set counter for gradient/reset steps */
55: lmP->bfgs = 0;
56: lmP->sgrad = 0;
57: lmP->grad = 0;
59: /* Have not converged; continue with Newton method */
60: while (reason == TAO_CONTINUE_ITERATING) {
61: /* Compute direction */
62: MatLMVMUpdate(lmP->M,tao->solution,tao->gradient);
63: MatLMVMSolve(lmP->M, tao->gradient, lmP->D);
64: ++lmP->bfgs;
66: /* Check for success (descent direction) */
67: VecDot(lmP->D, tao->gradient, &gdx);
68: if ((gdx <= 0.0) || PetscIsInfOrNanReal(gdx)) {
69: /* Step is not descent or direction produced not a number
70: We can assert bfgsUpdates > 1 in this case because
71: the first solve produces the scaled gradient direction,
72: which is guaranteed to be descent
73:
74: Use steepest descent direction (scaled)
75: */
77: ++lmP->grad;
79: if (f != 0.0) {
80: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
81: }
82: else {
83: delta = 2.0 / (gnorm*gnorm);
84: }
85: MatLMVMSetDelta(lmP->M, delta);
86: MatLMVMReset(lmP->M);
87: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
88: MatLMVMSolve(lmP->M,tao->gradient, lmP->D);
90: /* On a reset, the direction cannot be not a number; it is a
91: scaled gradient step. No need to check for this condition. */
93: lmP->bfgs = 1;
94: ++lmP->sgrad;
95: stepType = LMVM_SCALED_GRADIENT;
96: }
97: else {
98: if (1 == lmP->bfgs) {
99: /* The first BFGS direction is always the scaled gradient */
100: ++lmP->sgrad;
101: stepType = LMVM_SCALED_GRADIENT;
102: }
103: else {
104: ++lmP->bfgs;
105: stepType = LMVM_BFGS;
106: }
107: }
108: VecScale(lmP->D, -1.0);
109:
110: /* Perform the linesearch */
111: fold = f;
112: VecCopy(tao->solution, lmP->Xold);
113: VecCopy(tao->gradient, lmP->Gold);
115: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step,&ls_status);
116: TaoAddLineSearchCounts(tao);
117:
119: while (ls_status != TAOLINESEARCH_SUCCESS &&
120: ls_status != TAOLINESEARCH_SUCCESS_USER
121: && (stepType != LMVM_GRADIENT)) {
122: /* Linesearch failed */
123: /* Reset factors and use scaled gradient step */
124: f = fold;
125: VecCopy(lmP->Xold, tao->solution);
126: VecCopy(lmP->Gold, tao->gradient);
127:
128: switch(stepType) {
129: case LMVM_BFGS:
130: /* Failed to obtain acceptable iterate with BFGS step */
131: /* Attempt to use the scaled gradient direction */
133: if (f != 0.0) {
134: delta = 2.0 * PetscAbsScalar(f) / (gnorm*gnorm);
135: }
136: else {
137: delta = 2.0 / (gnorm*gnorm);
138: }
139: MatLMVMSetDelta(lmP->M, delta);
140: MatLMVMReset(lmP->M);
141: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
142: MatLMVMSolve(lmP->M, tao->gradient, lmP->D);
144: /* On a reset, the direction cannot be not a number; it is a
145: scaled gradient step. No need to check for this condition. */
146:
147: lmP->bfgs = 1;
148: ++lmP->sgrad;
149: stepType = LMVM_SCALED_GRADIENT;
150: break;
152: case LMVM_SCALED_GRADIENT:
153: /* The scaled gradient step did not produce a new iterate;
154: attempt to use the gradient direction.
155: Need to make sure we are not using a different diagonal scaling */
156: MatLMVMSetDelta(lmP->M, 1.0);
157: MatLMVMReset(lmP->M);
158: MatLMVMUpdate(lmP->M, tao->solution, tao->gradient);
159: MatLMVMSolve(lmP->M, tao->gradient, lmP->D);
161: lmP->bfgs = 1;
162: ++lmP->grad;
163: stepType = LMVM_GRADIENT;
164: break;
165: }
166: VecScale(lmP->D, -1.0);
167:
168: /* Perform the linesearch */
169: TaoLineSearchApply(tao->linesearch, tao->solution, &f, tao->gradient, lmP->D, &step, &ls_status);
170: TaoAddLineSearchCounts(tao);
171:
172: }
174: if (ls_status != TAOLINESEARCH_SUCCESS &&
175: ls_status != TAOLINESEARCH_SUCCESS_USER) {
176: /* Failed to find an improving point */
177: f = fold;
178: VecCopy(lmP->Xold, tao->solution);
179: VecCopy(lmP->Gold, tao->gradient);
180: step = 0.0;
181: reason = TAO_DIVERGED_LS_FAILURE;
182: tao->reason = TAO_DIVERGED_LS_FAILURE;
183: }
184: /* Check for termination */
185: VecNorm(tao->gradient, NORM_2, &gnorm);
186: iter++;
187: TaoMonitor(tao,iter,f,gnorm,0.0,step,&reason);
188: }
189: return(0);
190: }
193: static PetscErrorCode TaoSetUp_LMVM(TaoSolver tao)
194: {
195: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
196: PetscInt n,N;
200: /* Existence of tao->solution checked in TaoSetUp() */
201: if (!tao->gradient) {VecDuplicate(tao->solution,&tao->gradient); }
202: if (!tao->stepdirection) {VecDuplicate(tao->solution,&tao->stepdirection); }
203: if (!lmP->D) {VecDuplicate(tao->solution,&lmP->D); }
204: if (!lmP->Xold) {VecDuplicate(tao->solution,&lmP->Xold); }
205: if (!lmP->Gold) {VecDuplicate(tao->solution,&lmP->Gold); }
206:
207: /* Create matrix for the limited memory approximation */
208: VecGetLocalSize(tao->solution,&n);
209: VecGetSize(tao->solution,&N);
210: MatCreateLMVM(((PetscObject)tao)->comm,n,N,&lmP->M);
211: MatLMVMAllocateVectors(lmP->M,tao->solution);
212:
214: return(0);
215: }
217: /* ---------------------------------------------------------- */
220: static PetscErrorCode TaoDestroy_LMVM(TaoSolver tao)
221: {
223: TAO_LMVM *lmP = (TAO_LMVM *)tao->data;
227: if (tao->setupcalled) {
228: VecDestroy(&lmP->Xold);
229: VecDestroy(&lmP->Gold);
230: VecDestroy(&lmP->D);
231: MatDestroy(&lmP->M);
232: }
233: PetscFree(tao->data);
234: tao->data = PETSC_NULL;
236: return(0);
237: }
239: /*------------------------------------------------------------*/
242: static PetscErrorCode TaoSetFromOptions_LMVM(TaoSolver tao)
243: {
248: PetscOptionsHead("Limited-memory variable-metric method for unconstrained optimization");
249: TaoLineSearchSetFromOptions(tao->linesearch);
250: PetscOptionsTail();
251: return(0);
253: return(0);
254: }
256: /*------------------------------------------------------------*/
259: static PetscErrorCode TaoView_LMVM(TaoSolver tao, PetscViewer viewer)
260: {
262: TAO_LMVM *lm = (TAO_LMVM *)tao->data;
263: PetscBool isascii;
268: PetscTypeCompare((PetscObject)viewer, PETSCVIEWERASCII, &isascii);
269: if (isascii) {
271: PetscViewerASCIIPushTab(viewer);
272: PetscViewerASCIIPrintf(viewer, "BFGS steps: %D\n", lm->bfgs);
273: PetscViewerASCIIPrintf(viewer, "Scaled gradient steps: %D\n", lm->sgrad);
274: PetscViewerASCIIPrintf(viewer, "Gradient steps: %D\n", lm->grad);
275: PetscViewerASCIIPopTab(viewer);
276: } else {
277: SETERRQ1(((PetscObject)tao)->comm,PETSC_ERR_SUP,"Viewer type %s not supported for TAO LMVM",((PetscObject)viewer)->type_name);
278: }
279: return(0);
280: }
282: /* ---------------------------------------------------------- */
287: PetscErrorCode TaoCreate_LMVM(TaoSolver tao)
288: {
289:
290: TAO_LMVM *lmP;
291: const char *morethuente_type = TAOLINESEARCH_MT;
295: tao->ops->setup = TaoSetUp_LMVM;
296: tao->ops->solve = TaoSolve_LMVM;
297: tao->ops->view = TaoView_LMVM;
298: tao->ops->setfromoptions = TaoSetFromOptions_LMVM;
299: tao->ops->destroy = TaoDestroy_LMVM;
301: PetscNewLog(tao,TAO_LMVM, &lmP);
302: lmP->D = 0;
303: lmP->M = 0;
304: lmP->Xold = 0;
305: lmP->Gold = 0;
307: tao->data = (void*)lmP;
308: tao->max_it = 2000;
309: tao->max_funcs = 4000;
310: tao->fatol = 1e-4;
311: tao->frtol = 1e-4;
313: TaoLineSearchCreate(((PetscObject)tao)->comm,&tao->linesearch);
314: TaoLineSearchSetType(tao->linesearch,morethuente_type);
315: TaoLineSearchUseTaoSolverRoutines(tao->linesearch,tao);
317: return(0);
318: }