Actual source code: armijo.c
1: #include "armijo.h"
3: #define REPLACE_FIFO 1
4: #define REPLACE_MRU 2
6: #define REFERENCE_MAX 1
7: #define REFERENCE_AVE 2
8: #define REFERENCE_MEAN 3
12: static int TaoDestroy_Armijo(TAO_SOLVER tao, void*ctx)
13: {
14: TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
15: int info;
17: TaoFunctionBegin;
19: if (ls->memory != TAO_NULL) {
20: info = TaoFree(ls->memory); CHKERRQ(info);
21: ls->memory = TAO_NULL;
22: }
23: info = TaoFree(ls); CHKERRQ(info);
24: TaoFunctionReturn(0);
25: }
29: static int TaoSetOptions_Armijo(TAO_SOLVER tao, void* ctx)
30: {
31: TAO_ARMIJO *ls = (TAO_ARMIJO *)tao->linectx;
32: int info;
34: TaoFunctionBegin;
35: info = TaoOptionsHead("Armijo linesearch options");CHKERRQ(info);
36: info = TaoOptionDouble("-tao_armijo_alpha", "initial reference constant", "", ls->alpha, &ls->alpha, 0); CHKERRQ(info);
37: info = TaoOptionDouble("-tao_armijo_beta_inf", "decrease constant one", "", ls->beta_inf, &ls->beta_inf, 0); CHKERRQ(info);
38: info = TaoOptionDouble("-tao_armijo_beta", "decrease constant", "", ls->beta, &ls->beta, 0); CHKERRQ(info);
39: info = TaoOptionDouble("-tao_armijo_sigma", "acceptance constant", "", ls->sigma, &ls->sigma, 0); CHKERRQ(info);
40: info = TaoOptionInt("-tao_armijo_memory_size", "number of historical elements", "", ls->memorySize, &ls->memorySize, 0); CHKERRQ(info);
41: info = TaoOptionDouble("-tao_armijo_minimum_step", "minimum acceptable step", "", ls->minimumStep, &ls->minimumStep, 0); CHKERRQ(info);
42: info = TaoOptionInt("-tao_projected_armijo_reference_policy", "policy for updating reference value", "", ls->referencePolicy, &ls->referencePolicy, 0); CHKERRQ(info);
43: info = TaoOptionInt("-tao_projected_armijo_replacement_policy", "policy for updating memory", "", ls->replacementPolicy, &ls->replacementPolicy, 0); CHKERRQ(info);
44: info = TaoOptionsTail();CHKERRQ(info);
45: TaoFunctionReturn(0);
46: }
50: static int TaoView_Armijo(TAO_SOLVER tao, void*ctx)
51: {
52: TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
53: int info;
55: TaoFunctionBegin;
56:
57: info=TaoPrintDouble(tao," Armijo linesearch: alpha=%g",ls->alpha);CHKERRQ(info);
58: info=TaoPrintDouble(tao," beta=%g ",ls->beta);CHKERRQ(info);
59: info=TaoPrintDouble(tao,"sigma=%g ",ls->sigma);CHKERRQ(info);
60: info=TaoPrintDouble(tao,"minstep=%g,",ls->minimumStep);CHKERRQ(info);
61: info=TaoPrintInt(tao,"memsize=%d\n",ls->memorySize);CHKERRQ(info);
62:
63: TaoFunctionReturn(0);
64: }
68: static int TaoApply_PreArmijo(TAO_SOLVER tao, TAO_ARMIJO *ls,
69: double f, double step,
70: double *ref, TaoInt *idx, TaoInt *info2)
71: {
72: int info;
73: TaoInt i;
75: TaoFunctionBegin;
77: *info2 = 0;
79: // Check linesearch parameters
80: if (step < 0) {
81: info = PetscInfo1(tao, "TaoApply_Armijo:Line search error: step (%g) < 0\n", step); CHKERRQ(info);
82: *info2 = -1;
83: }
85: if (ls->alpha < 1) {
86: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: alpha (%g) < 1\n", ls->alpha); CHKERRQ(info);
87: *info2 = -2;
88: }
89:
90: if ((ls->beta <= 0) || (ls->beta >= 1)) {
91: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: beta (%g) invalid\n", ls->beta); CHKERRQ(info);
92: *info2 = -3;
93: }
94:
95: if ((ls->beta_inf <= 0) || (ls->beta_inf >= 1)) {
96: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: beta_inf (%g) invalid\n", ls->beta_inf); CHKERRQ(info);
97: *info2 = -4;
98: }
100: if ((ls->sigma <= 0) || (ls->sigma >= 0.5)) {
101: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: sigma (%g) invalid\n", ls->sigma); CHKERRQ(info);
102: *info2 = -5;
103: }
104:
105: if (ls->minimumStep <= 0) {
106: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: minimum_step (%g) <= 0\n", ls->minimumStep); CHKERRQ(info);
107: *info2 = -6;
108: }
109:
110: if (ls->memorySize < 1) {
111: info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: memory_size (%d) < 1\n", ls->memorySize); CHKERRQ(info);
112: *info2 = -7;
113: }
114:
115: if ((ls->referencePolicy != REFERENCE_MAX) &&
116: (ls->referencePolicy != REFERENCE_AVE) &&
117: (ls->referencePolicy != REFERENCE_MEAN)) {
118: info = PetscInfo(tao,"TaoApply_Armijo:Line search error: reference_policy invalid\n"); CHKERRQ(info);
119: *info2 = -8;
120: }
121:
122: if ((ls->replacementPolicy != REPLACE_FIFO) &&
123: (ls->replacementPolicy != REPLACE_MRU)) {
124: info = PetscInfo(tao,"TaoApply_Armijo:Line search error: replacement_policy invalid\n"); CHKERRQ(info);
125: *info2 = -9;
126: }
127:
128: if (TaoInfOrNaN(f)) {
129: info = PetscInfo(tao,"TaoApply_Armijo:Line search error: initial function inf or nan\n"); CHKERRQ(info);
130: *info2 = -10;
131: }
133: if (*info2) {
134: TaoFunctionReturn(0);
135: }
137: // Check to see of the memory has been allocated. If not, allocate
138: // the historical array and populate it with the initial function
139: // values.
140: if (ls->memory == TAO_NULL) {
141: info = TaoMalloc(sizeof(double)*ls->memorySize, &ls->memory ); CHKERRQ(info);
142:
143: info = PetscLogObjectMemory(tao, sizeof(double)*ls->memorySize); CHKERRQ(info);
144: }
146: if (tao->iter == 0) {
147: for (i = 0; i < ls->memorySize; i++) {
148: ls->memory[i] = ls->alpha*f;
149: }
151: ls->current = 0;
152: ls->lastReference = ls->memory[0];
153: }
155: // Calculate reference value (MAX)
156: *ref = ls->memory[0];
157: *idx = 0;
159: for (i = 1; i < ls->memorySize; i++) {
160: if (ls->memory[i] > *ref) {
161: *ref = ls->memory[i];
162: *idx = i;
163: }
164: }
166: if (ls->referencePolicy == REFERENCE_AVE) {
167: *ref = 0;
168: for (i = 0; i < ls->memorySize; i++) {
169: *ref += ls->memory[i];
170: }
171: *ref = *ref / ls->memorySize;
172: *ref = TaoMax(*ref, ls->memory[ls->current]);
173: }
174: else if (ls->referencePolicy == REFERENCE_MEAN) {
175: *ref = TaoMin(*ref, 0.5*(ls->lastReference + ls->memory[ls->current]));
176: }
177: TaoFunctionReturn(0);
178: }
182: static int TaoApply_PostArmijo(TAO_SOLVER tao, TAO_ARMIJO *ls,
183: double f, double step,
184: double ref, TaoInt idx, TaoInt *info2)
185: {
186: int info;
187: TaoFunctionBegin;
189: *info2 = 0;
191: // Check termination
192: if (step < ls->minimumStep) {
193: info = PetscInfo(tao, "TaoApply_Armijo:Step is at lower bound.\n"); CHKERRQ(info);
194: *info2 = 1;
195: }
197: if (TaoInfOrNaN(f)) {
198: info = PetscInfo(tao, "TaoApply_Armijo:Function is inf or nan.\n"); CHKERRQ(info);
199: *info2 = 2;
200: }
202: if (*info2) {
203: TaoFunctionReturn(0);
204: }
206: // Successful termination, update memory
207: ls->lastReference = ref;
208: if (ls->replacementPolicy == REPLACE_FIFO) {
209: ls->memory[ls->current++] = f;
210: if (ls->current >= ls->memorySize) {
211: ls->current = 0;
212: }
213: }
214: else {
215: ls->current = idx;
216: ls->memory[idx] = f;
217: }
218: TaoFunctionReturn(0);
219: }
223: /* @ TaoApply_Armijo - This routine performs a linesearch. It
224: backtracks until the (nonmonotone) Armijo conditions are satisfied.
226: Input Parameters:
227: + tao - TAO_SOLVER context
228: . X - current iterate (on output X contains new iterate, X + step*S)
229: . S - search direction
230: . f - merit function evaluated at X
231: . G - gradient of merit function evaluated at X
232: . W - work vector
233: - step - initial estimate of step length
235: Output parameters:
236: + f - merit function evaluated at new iterate, X + step*S
237: . G - gradient of merit function evaluated at new iterate, X + step*S
238: . X - new iterate
239: - step - final step length
241: Info is set to one of:
242: . 0 - the line search succeeds; the sufficient decrease
243: condition and the directional derivative condition hold
245: negative number if an input parameter is invalid
246: - -1 - step < 0
248: positive number > 1 if the line search otherwise terminates
249: + 1 - Step is at the lower bound, stepmin.
250: @ */
252: static int TaoApply_Armijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, TaoVec *S,
253: TaoVec *W, double *f, double *f_full, double *step,
254: TaoInt *info2, void *ctx)
255: {
256: TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
258: const double beta = ls->beta;
259: const double beta_inf = ls->beta_inf;
261: double fact, ref, t, gdx;
262: TaoInt idx;
263: int info;
265: TaoFunctionBegin;
266: info = TaoApply_PreArmijo(tao, ls, *f, *step, &ref, &idx, info2);
268: #if defined(PETSC_USE_COMPLEX)
269: info = G->Dot(S,&cgdx);CHKERRQ(info); gdx = TaoReal(cgdx);
270: #else
271: info = G->Dot(S,&gdx);CHKERRQ(info);
272: #endif
274: if (TaoInfOrNaN(gdx)) {
275: info = PetscInfo(tao,"TaoApply_Armijo:Line search error: gdx is inf or nan\n"); CHKERRQ(info);
276: *info2 = -11;
277: }
279: if (gdx >= 0.0) {
280: info = PetscInfo(tao,"TaoApply_LineSearch:Search direction not a descent direction\n"); CHKERRQ(info);
281: *info2 = 12;
282: }
283:
284: if (*info2) {
285: TaoFunctionReturn(0);
286: }
288: fact = ls->sigma * gdx;
289: t = *step;
290: tao->new_search=TAO_TRUE;
291: while (t >= ls->minimumStep) {
292: // Calculate iterate
293: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
295: // Calculate function at new iterate
296: tao->current_step=t;
297: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
298: tao->new_search=TAO_FALSE;
299: if (*step == t) {
300: *f_full = *f;
301: }
303: if (TaoInfOrNaN(*f)) {
304: t *= beta_inf;
305: }
306: else {
307: // Check descent condition
308: if (*f <= ref + t*fact) {
309: break;
310: }
312: t *= beta;
313: }
314: }
316: info = TaoApply_PostArmijo(tao, ls, *f, t, ref, idx, info2);
318: // Update iterate and compute gradient
319: *step = t;
320: info = X->CopyFrom(W); CHKERRQ(info);
321: tao->current_step=t;
322: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
324: // Finish computations
325: info = PetscInfo1(tao, "TaoApply_Armijo:step = %10.4f\n",*step); CHKERRQ(info);
326: TaoFunctionReturn(0);
327: }
331: /* @ TaoApply_NDArmijo - This routine performs a linesearch. It
332: backtracks until the (nonmonotone) Armijo conditions are satisfied.
333: This is modified for a nondifferentiable function.
335: Input Parameters:
336: + tao - TAO_SOLVER context
337: . X - current iterate (on output X contains new iterate, X + step*S)
338: . S - search direction
339: . f - merit function evaluated at X
340: - step - initial estimate of step length
342: Output parameters:
343: + f - merit function evaluated at new iterate, X + step*S
344: . X - new iterate
345: - step - final step length
347: Info is set to one of:
348: . 0 - the line search succeeds; the sufficient decrease
349: condition and the directional derivative condition hold
351: negative number if an input parameter is invalid
352: - -1 - step < 0
354: positive number > 1 if the line search otherwise terminates
355: + 1 - Step is at the lower bound, stepmin.
356: @ */
358: static int TaoApply_NDArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, TaoVec *S,
359: TaoVec *W, double *f, double *f_full, double *step,
360: TaoInt *info2, void *ctx)
361: {
362: TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
364: const double fact = ls->sigma;
365: const double beta = ls->beta;
366: const double beta_inf = ls->beta_inf;
368: double ref, t;
369: TaoInt idx;
370: int info;
372: TaoFunctionBegin;
374: info = TaoApply_PreArmijo(tao, ls, *f, *step, &ref, &idx, info2);
375: if (*info2) {
376: TaoFunctionReturn(0);
377: }
379: t = *step;
380: tao->new_search=TAO_TRUE;
381: while (t >= ls->minimumStep) {
382: // Calculate iterate
383: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
385: // Calculate function at new iterate
386: tao->current_step=t;
387: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
388: tao->new_search=TAO_FALSE;
389: if (*step == t) {
390: *f_full = *f;
391: }
393: if (TaoInfOrNaN(*f)) {
394: t *= beta_inf;
395: }
396: else {
397: // Check descent condition
398: if (*f <= (1 - fact*t)*ref) {
399: break;
400: }
402: t *= beta;
403: }
404: }
406: info = TaoApply_PostArmijo(tao, ls, *f, t, ref, idx, info2);
408: // Update iterate and compute gradient
409: *step = t;
410: info = X->CopyFrom(W); CHKERRQ(info);
411: tao->current_step=t;
412: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
414: // Finish computations
415: info = PetscInfo1(tao, "TaoApply_NDArmijo:step = %10.4f\n",*step); CHKERRQ(info);
416: TaoFunctionReturn(0);
417: }
421: /*@C
422: TaoCreateArmijoLineSearch - Create a non-monotone linesearch
424: Input Parameters:
425: . tao - TAO_SOLVER context
428: Note:
429: This algorithm is taken from the following references --
431: Armijo, "Minimization of Functions Having Lipschitz Continuous
432: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
433: pages 1-3, 1966.
434: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
435: Equations," Journal of Optimization Theory and Applications, volume 81,
436: pages 53-71, 1994.
437: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
438: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
439: pages 707-716, 1986.
440: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
441: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
442: pages 779-805, 1991.
444: Note:
445: This line seach enforces non-monotone Armijo descent conditions for
446: unconstrained optimization. This routine is used within the following
447: TAO solvers: infeasible semismooth with linesearch (tao_ssils).
449: Level: developer
451: .keywords: TAO_SOLVER, linesearch
452: @*/
453: int TaoCreateArmijoLineSearch(TAO_SOLVER tao)
454: {
455: TAO_ARMIJO *ls;
456: int info;
458: TaoFunctionBegin;
460: info = TaoNew(TAO_ARMIJO, &ls);CHKERRQ(info);
461: info = PetscLogObjectMemory(tao,sizeof(TAO_ARMIJO)); CHKERRQ(info);
463: ls->memory = TAO_NULL;
464: ls->alpha = 1.0;
465: ls->beta = 0.5;
466: ls->beta_inf = 0.5;
467: ls->sigma = 1e-4;
468: ls->minimumStep = TAO_EPSILON;
469: ls->memorySize = 1;
470: ls->referencePolicy = REFERENCE_MAX;
471: ls->replacementPolicy = REPLACE_MRU;
473: info = TaoSetLineSearch(tao,0,
474: TaoSetOptions_Armijo,
475: TaoApply_Armijo,
476: TaoView_Armijo,
477: TaoDestroy_Armijo,
478: (void *) ls);CHKERRQ(info);
480: TaoFunctionReturn(0);
481: }
485: /*@C
486: TaoCreateNDArmijoLineSearch - Create a non-monotone linesearch for a
487: nondifferentiable function
489: Input Parameters:
490: . tao - TAO_SOLVER context
493: Note:
494: This algorithm is taken from the following references --
496: Armijo, "Minimization of Functions Having Lipschitz Continuous
497: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
498: pages 1-3, 1966.
499: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
500: Equations," Journal of Optimization Theory and Applications, volume 81,
501: pages 53-71, 1994.
502: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
503: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
504: pages 707-716, 1986.
505: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
506: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
507: pages 779-805, 1991.
509: Note:
510: This line seach enforces non-monotone Armijo descent conditions for
511: unconstrained optimization. This routine is used within the following
512: TAO solvers: infeasible semismooth with linesearch (tao_ssils).
514: Level: developer
516: .keywords: TAO_SOLVER, linesearch
517: @*/
518: int TaoCreateNDArmijoLineSearch(TAO_SOLVER tao)
519: {
520: TAO_ARMIJO *ls;
521: int info;
523: TaoFunctionBegin;
525: info = TaoNew(TAO_ARMIJO, &ls);CHKERRQ(info);
526: info = PetscLogObjectMemory(tao,sizeof(TAO_ARMIJO)); CHKERRQ(info);
528: ls->memory = TAO_NULL;
529: ls->alpha = 1.0;
530: ls->beta = 0.5;
531: ls->beta_inf = 0.5;
532: ls->sigma = 1e-4;
533: ls->minimumStep = TAO_EPSILON;
534: ls->memorySize = 1;
535: ls->referencePolicy = REFERENCE_MAX;
536: ls->replacementPolicy = REPLACE_MRU;
538: info = TaoSetLineSearch(tao,0,
539: TaoSetOptions_Armijo,
540: TaoApply_NDArmijo,
541: TaoView_Armijo,
542: TaoDestroy_Armijo,
543: (void *) ls);CHKERRQ(info);
545: TaoFunctionReturn(0);
546: }