Actual source code: projectedarmijo.c
1: #include "projectedarmijo.h"
3: #define REPLACE_FIFO 1
4: #define REPLACE_MRU 2
6: #define REFERENCE_MAX 1
7: #define REFERENCE_AVE 2
8: #define REFERENCE_MEAN 3
12: static int TaoDestroy_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
13: {
14: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
15: int info;
17: TaoFunctionBegin;
18: if (ls->work != TAO_NULL) {
19: delete ls->work;
20: }
22: if (ls->memory != TAO_NULL) {
23: info = TaoFree(ls->memory); CHKERRQ(info);
24: ls->memory = TAO_NULL;
25: }
26: info = TaoFree(ls); CHKERRQ(info);
27: TaoFunctionReturn(0);
28: }
32: static int TaoSetOptions_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
33: {
34: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
35: int info;
37: TaoFunctionBegin;
38: info = TaoOptionsHead("Projected Armijo linesearch options");CHKERRQ(info);
39: info = TaoOptionDouble("-tao_projected_armijo_alpha", "initial reference constant", "", ls->alpha, &ls->alpha, 0); CHKERRQ(info);
40: info = TaoOptionDouble("-tao_projected_armijo_beta", "decrease constant", "", ls->beta, &ls->beta, 0); CHKERRQ(info);
41: info = TaoOptionDouble("-tao_projected_armijo_sigma", "acceptance constant", "", ls->sigma, &ls->sigma, 0); CHKERRQ(info);
42: info = TaoOptionInt("-tao_projected_armijo_memory_size", "number of historical elements", "", ls->memorySize, &ls->memorySize, 0); CHKERRQ(info);
43: info = TaoOptionDouble("-tao_projected_armijo_minimum_step", "minimum acceptable step", "", ls->minimumStep, &ls->minimumStep, 0); CHKERRQ(info);
44: info = TaoOptionInt("-tao_projected_armijo_reference_policy", "policy for updating reference value", "", ls->referencePolicy, &ls->referencePolicy, 0); CHKERRQ(info);
45: info = TaoOptionInt("-tao_projected_armijo_replacement_policy", "policy for updating memory", "", ls->replacementPolicy, &ls->replacementPolicy, 0); CHKERRQ(info);
46: info = TaoOptionsTail();CHKERRQ(info);
47: TaoFunctionReturn(0);
48: }
52: static int TaoView_ProjectedArmijo(TAO_SOLVER tao, void *ctx)
53: {
54: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
55: int info;
57: TaoFunctionBegin;
59: info=TaoPrintDouble(tao," Projected Armijo linesearch: alpha=%g",ls->alpha);CHKERRQ(info);
60: info=TaoPrintDouble(tao," beta=%g ",ls->beta);CHKERRQ(info);
61: info=TaoPrintDouble(tao,"sigma=%g ",ls->sigma);CHKERRQ(info);
62: info=TaoPrintDouble(tao,"minstep=%g,",ls->minimumStep);CHKERRQ(info);
63: info=TaoPrintInt(tao,"memsize=%d\n",ls->memorySize);CHKERRQ(info);
65: TaoFunctionReturn(0);
66: }
70: static int TaoApply_PreProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
71: double f, double step,
72: double *ref, TaoInt *idx, TaoInt *info2)
73: {
74: int info;
75: TaoInt i;
77: TaoFunctionBegin;
79: *info2 = 0;
81: // Check linesearch parameters
82: if (step < 0) {
83: info = PetscInfo1(tao, "TaoApply_ProjectedArmijo:Line search error: step (%g) < 0\n", step); CHKERRQ(info);
84: *info2 = -1;
85: TaoFunctionReturn(0);
86: } else if (ls->alpha < 1) {
87: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: alpha (%g) < 1\n", ls->alpha); CHKERRQ(info);
88: *info2 = -2;
89: TaoFunctionReturn(0);
90: } else if ((ls->beta <= 0) || (ls->beta >= 1)) {
91: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: beta (%g) invalid\n", ls->beta); CHKERRQ(info);
92: *info2 = -3;
93: TaoFunctionReturn(0);
94: } else if ((ls->sigma <= 0) || (ls->sigma >= 0.5)) {
95: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: sigma (%g) invalid\n", ls->sigma); CHKERRQ(info);
96: *info2 = -4;
97: TaoFunctionReturn(0);
98: } else if (ls->minimumStep <= 0) {
99: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: minimum_step (%g) <= 0\n", ls->minimumStep); CHKERRQ(info);
100: *info2 = -5;
101: TaoFunctionReturn(0);
102: } else if (ls->memorySize < 1) {
103: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: memory_size (%d) < 1\n", ls->memorySize); CHKERRQ(info);
104: *info2 = -6;
105: TaoFunctionReturn(0);
106: } else if ((ls->referencePolicy != REFERENCE_MAX) &&
107: (ls->referencePolicy != REFERENCE_AVE) &&
108: (ls->referencePolicy != REFERENCE_MEAN)){
109: info = PetscInfo(tao,"TaoApply_ProjectedArmijo:Line search error: reference_policy invalid\n"); CHKERRQ(info);
110: *info2 = -7;
111: TaoFunctionReturn(0);
112: } else if ((ls->replacementPolicy != REPLACE_FIFO) &&
113: (ls->replacementPolicy != REPLACE_MRU)) {
114: info = PetscInfo(tao,"TaoApply_ProjectedArmijo:Line search error: replacement_policy invalid\n"); CHKERRQ(info);
115: *info2 = -8;
116: TaoFunctionReturn(0);
117: }
119: // Check to see of the memory has been allocated. If not, allocate
120: // the historical array and populate it with the initial function
121: // values.
123: if (ls->memory == TAO_NULL) {
124: info = TaoMalloc(sizeof(double)*ls->memorySize, &ls->memory);CHKERRQ(info);
125: info = PetscLogObjectMemory(tao, sizeof(double)*ls->memorySize); CHKERRQ(info);
126: }
128: if (tao->iter == 0) {
129: for (i = 0; i < ls->memorySize; i++) {
130: ls->memory[i] = ls->alpha*(f);
131: }
133: ls->current = 0;
134: ls->lastReference = ls->memory[0];
135: }
137: // Calculate reference value (MAX)
138: *ref = ls->memory[0];
139: *idx = 0;
141: for (i = 1; i < ls->memorySize; i++) {
142: if (ls->memory[i] > *ref) {
143: *ref = ls->memory[i];
144: *idx = i;
145: }
146: }
148: if (ls->referencePolicy == REFERENCE_AVE) {
149: *ref = 0;
150: for (i = 0; i < ls->memorySize; i++) {
151: *ref += ls->memory[i];
152: }
153: *ref = *ref / ls->memorySize;
154: *ref = TaoMax(*ref, ls->memory[ls->current]);
155: } else if (ls->referencePolicy == REFERENCE_MEAN) {
156: *ref = TaoMin(*ref, 0.5*(ls->lastReference + ls->memory[ls->current]));
157: }
159: TaoFunctionReturn(0);
160: }
164: static int TaoApply_PostProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
165: double f, double step,
166: double ref, TaoInt idx, TaoInt *info2)
167: {
168: int info;
169: TaoFunctionBegin;
171: *info2 = 0;
173: // Check termination
174: if (step < ls->minimumStep) {
175: info = PetscInfo(tao, "TaoApply_ProjectedArmijo:Step is at lower bound.\n"); CHKERRQ(info);
176: *info2 = 1;
177: TaoFunctionReturn(0);
178: }
180: // Successful termination, update memory
181: ls->lastReference = ref;
182: if (ls->replacementPolicy == REPLACE_FIFO) {
183: ls->memory[ls->current++] = f;
184: if (ls->current >= ls->memorySize) {
185: ls->current = 0;
186: }
187: } else {
188: ls->current = idx;
189: ls->memory[idx] = f;
190: }
191: TaoFunctionReturn(0);
192: }
196: /* @ TaoApply_ProjectedArmijo - This routine performs a linesearch. It
197: backtracks until the (nonmonotone) Projected Armijo conditions are satisfied.
199: Input Parameters:
200: + tao - TAO_SOLVER context
201: . X - current iterate (on output X contains new iterate, X + step*S)
202: . S - search direction
203: . f - merit function evaluated at X
204: . G - gradient of merit function evaluated at X
205: . W - work vector
206: - step - initial estimate of step length
208: Output parameters:
209: + f - merit function evaluated at new iterate, X + step*S
210: . G - gradient of merit function evaluated at new iterate, X + step*S
211: . X - new iterate
212: - step - final step length
214: Info is set to one of:
215: . 0 - the line search succeeds; the sufficient decrease
216: condition and the directional derivative condition hold
218: negative number if an input parameter is invalid
219: - -1 - step < 0
221: positive number > 1 if the line search otherwise terminates
222: + 1 - Step is at the lower bound, stepmin.
223: @ */
225: static int TaoApply_ProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G,
226: TaoVec *S, TaoVec *W,
227: double *f, double *f_full, double *step,
228: TaoInt *info2, void *ctx)
229: {
230: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
231: TaoVec *L, *U, *work;
232: double ref, innerd, t;
233: TaoInt idx;
234: int info;
235: TaoTruth flag;
237: TaoFunctionBegin;
239: info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
240: if (*info2) {
241: TaoFunctionReturn(0);
242: }
244: if (ls->work!=TAO_NULL){
245: info=X->Compatible(ls->work,&flag); CHKERRQ(info);
246: if (flag==TAO_FALSE){
247: info=TaoVecDestroy(ls->work); CHKERRQ(info);
248: ls->work=TAO_NULL;
249: }
250: }
252: if (ls->work == TAO_NULL) {
253: G->Clone(&(ls->work));
254: }
256: info = TaoGetVariableBounds(tao, &L, &U);
257: work = ls->work;
259: const double sigma = ls->sigma;
260: const double beta = ls->beta;
262: t = *step;
263: tao->new_search=TAO_TRUE;
264: while (t >= ls->minimumStep) {
265: // Calculate iterate
266: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
267: info = W->PointwiseMaximum(W, L); CHKERRQ(info);
268: info = W->PointwiseMinimum(W, U); CHKERRQ(info);
270: info = work->Waxpby(1.0, X, -1.0, W); CHKERRQ(info);
271: info = work->Dot(G, &innerd); CHKERRQ(info);
273: if (innerd > 0) {
274: // Calculate function at new iterate
275: tao->current_step=t;
276: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
277: tao->new_search=TAO_FALSE;
278: if (*step == t) {
279: *f_full = *f;
280: }
282: // Check descent condition
283: if (*f <= ref - sigma*innerd) {
284: break;
285: }
286: }
287: else if (*step == t) {
288: tao->current_step=t;
289: info = TaoComputeMeritFunction(tao, W, f_full); CHKERRQ(info);
290: tao->new_search=TAO_FALSE;
291: }
293: t *= beta;
294: }
296: info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);
298: // Update iterate and compute gradient
299: *step = t;
300: info = X->CopyFrom(W); CHKERRQ(info);
301: tao->current_step=t;
302: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
304: // Finish computations
305: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:step = %10.4f\n",*step); CHKERRQ(info);
306: TaoFunctionReturn(0);
307: }
311: /* @ TaoApply_NDProjectedArmijo - This routine performs a linesearch. It
312: backtracks until the (nonmonotone) Projected Armijo conditions are
313: satisfied. This is a modified version for a nondifferentiable function.
315: Input Parameters:
316: + tao - TAO_SOLVER context
317: . X - current iterate (on output X contains new iterate, X + step*S)
318: . S - search direction
319: . f - merit function evaluated at X
320: - step - initial estimate of step length
322: Output parameters:
323: + f - merit function evaluated at new iterate, X + step*S
324: . X - new iterate
325: - step - final step length
327: Info is set to one of:
328: . 0 - the line search succeeds; the sufficient decrease
329: condition and the directional derivative condition hold
331: negative number if an input parameter is invalid
332: - -1 - step < 0
334: positive number > 1 if the line search otherwise terminates
335: + 1 - Step is at the lower bound, stepmin.
336: @ */
338: static int TaoApply_NDProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G,
339: TaoVec *S, TaoVec *W,
340: double *f, double *f_full, double *step,
341: TaoInt *info2, void *ctx)
342: {
343: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
344: TaoVec *L, *U;
345: double ref, t;
346: int info;
347: TaoInt idx;
349: TaoFunctionBegin;
351: info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
352: if (*info2) {
353: TaoFunctionReturn(0);
354: }
356: info = TaoGetVariableBounds(tao, &L, &U);
358: const double sigma = ls->sigma;
359: const double beta = ls->beta;
361: t = *step;
362: tao->new_search=TAO_TRUE;
363: while (t >= ls->minimumStep) {
364: // Calculate iterate
365: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
366: info = W->PointwiseMaximum(W, L); CHKERRQ(info);
367: info = W->PointwiseMinimum(W, U); CHKERRQ(info);
369: // Calculate function at new iterate
371: tao->current_step=t;
372: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
373: tao->new_search=TAO_FALSE;
374: if (*step == t) {
375: *f_full = *f;
376: }
378: // Check descent condition
379: if (*f <= (1 - sigma*t)*ref) {
380: break;
381: }
382:
383: t *= beta;
384: }
386: info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);
388: // Update iterate and compute gradient
389: *step = t;
390: info = X->CopyFrom(W); CHKERRQ(info);
391: tao->current_step=t;
392: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
395: // Finish computations
396: info = PetscInfo1(tao,"TaoApply_NDProjectedArmijo:step = %10.4f\n",*step); CHKERRQ(info);
397: TaoFunctionReturn(0);
398: }
402: /*@
403: TaoCreateProjectedArmijoLineSearch - Create a non-monotone projected linesearch
405: Input Parameters:
406: . tao - TAO_SOLVER context
409: Note:
410: This algorithm is taken from the following references --
412: Armijo, "Minimization of Functions Having Lipschitz Continuous
413: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
414: pages 1-3, 1966.
415: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
416: Equations," Journal of Optimization Theory and Applications, volume 81,
417: pages 53-71, 1994.
418: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
419: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
420: pages 707-716, 1986.
421: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
422: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
423: pages 779-805, 1991.
425: Note:
426: This line seach enforces non-monotone Armijo descent conditions for
427: bounds constrained optimization. This routine is used within the
428: following TAO solvers: feasible semismooth with linesearch (tao_ssfls).
430: Level: developer
432: .keywords: TAO_SOLVER, linesearch
433: @*/
434: int TaoCreateProjectedArmijoLineSearch(TAO_SOLVER tao)
435: {
436: TAO_PROJECTEDARMIJO *ls;
437: int info;
439: TaoFunctionBegin;
441: info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
442: info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO)); CHKERRQ(info);
444: ls->work = TAO_NULL;
445: ls->memory = TAO_NULL;
446: ls->alpha = 1.0;
447: ls->beta = 0.5;
448: ls->sigma = 1e-4;
449: ls->minimumStep = TAO_EPSILON;
450: ls->memorySize = 1;
451: ls->referencePolicy = REFERENCE_MAX;
452: ls->replacementPolicy = REPLACE_MRU;
454: info = TaoSetLineSearch(tao,0,
455: TaoSetOptions_ProjectedArmijo,
456: TaoApply_ProjectedArmijo,
457: TaoView_ProjectedArmijo,
458: TaoDestroy_ProjectedArmijo,
459: (void *) ls);CHKERRQ(info);
461: TaoFunctionReturn(0);
462: }
466: /*@
467: TaoCreateNDProjectedArmijoLineSearch - Create a non-monotone projected linesearch
468: for a nondifferentiable function
470: Input Parameters:
471: . tao - TAO_SOLVER context
474: Note:
475: This algorithm is taken from the following references --
477: Armijo, "Minimization of Functions Having Lipschitz Continuous
478: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
479: pages 1-3, 1966.
480: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
481: Equations," Journal of Optimization Theory and Applications, volume 81,
482: pages 53-71, 1994.
483: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
484: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
485: pages 707-716, 1986.
486: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
487: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
488: pages 779-805, 1991.
490: Note:
491: This line seach enforces non-monotone Armijo descent conditions for
492: bounds constrained optimization. This routine is used within the
493: following TAO solvers: feasible semismooth with linesearch (tao_ssfls).
495: Level: developer
497: .keywords: TAO_SOLVER, linesearch
498: @*/
499: int TaoCreateNDProjectedArmijoLineSearch(TAO_SOLVER tao)
500: {
501: TAO_PROJECTEDARMIJO *ls;
502: int info;
504: TaoFunctionBegin;
506: info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
507: info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO));CHKERRQ(info);
509: ls->work = TAO_NULL;
510: ls->memory = TAO_NULL;
511: ls->alpha = 1.0;
512: ls->beta = 0.5;
513: ls->sigma = 1e-4;
514: ls->minimumStep = TAO_EPSILON;
515: ls->memorySize = 1;
516: ls->referencePolicy = REFERENCE_MAX;
517: ls->replacementPolicy = REPLACE_MRU;
519: info = TaoSetLineSearch(tao,0,
520: TaoSetOptions_ProjectedArmijo,
521: TaoApply_NDProjectedArmijo,
522: TaoView_ProjectedArmijo,
523: TaoDestroy_ProjectedArmijo,
524: (void *) ls);CHKERRQ(info);
526: TaoFunctionReturn(0);
527: }