Actual source code: projectedarmijo.c
1: #include "projectedarmijo.h"
3: #define REPLACE_FIFO 1
4: #define REPLACE_MRU 2
6: #define REFERENCE_MAX 1
7: #define REFERENCE_AVE 2
8: #define REFERENCE_MEAN 3
12: static int TaoDestroy_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
13: {
14: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
15: int info;
17: TaoFunctionBegin;
18: if (ls->work != TAO_NULL) {
19: delete ls->work;
20: }
22: if (ls->memory != TAO_NULL) {
23: info = TaoFree(ls->memory); CHKERRQ(info);
24: ls->memory = TAO_NULL;
25: }
26: info = TaoFree(ls); CHKERRQ(info);
27: TaoFunctionReturn(0);
28: }
32: static int TaoSetOptions_ProjectedArmijo(TAO_SOLVER tao, void*ctx)
33: {
34: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
35: int info;
37: TaoFunctionBegin;
38: info = TaoOptionsHead("Projected Armijo linesearch options");CHKERRQ(info);
39: info = TaoOptionDouble("-tao_projected_armijo_alpha", "initial reference constant", "", ls->alpha, &ls->alpha, 0); CHKERRQ(info);
40: info = TaoOptionDouble("-tao_projected_armijo_beta", "decrease constant", "", ls->beta, &ls->beta, 0); CHKERRQ(info);
41: info = TaoOptionDouble("-tao_projected_armijo_sigma", "acceptance constant", "", ls->sigma, &ls->sigma, 0); CHKERRQ(info);
42: info = TaoOptionInt("-tao_projected_armijo_memory_size", "number of historical elements", "", ls->memorySize, &ls->memorySize, 0); CHKERRQ(info);
43: info = TaoOptionDouble("-tao_projected_armijo_minimum_step", "minimum acceptable step", "", ls->minimumStep, &ls->minimumStep, 0); CHKERRQ(info);
44: info = TaoOptionInt("-tao_projected_armijo_reference_policy", "policy for updating reference value", "", ls->referencePolicy, &ls->referencePolicy, 0); CHKERRQ(info);
45: info = TaoOptionInt("-tao_projected_armijo_replacement_policy", "policy for updating memory", "", ls->replacementPolicy, &ls->replacementPolicy, 0); CHKERRQ(info);
46: info = TaoOptionsTail();CHKERRQ(info);
47: TaoFunctionReturn(0);
48: }
52: static int TaoView_ProjectedArmijo(TAO_SOLVER tao, void *ctx)
53: {
54: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
55: int info;
57: TaoFunctionBegin;
59: info=TaoPrintDouble(tao," Projected Armijo linesearch: alpha=%g",ls->alpha);CHKERRQ(info);
60: info=TaoPrintDouble(tao," beta=%g ",ls->beta);CHKERRQ(info);
61: info=TaoPrintDouble(tao,"sigma=%g ",ls->sigma);CHKERRQ(info);
62: info=TaoPrintDouble(tao,"minstep=%g,",ls->minimumStep);CHKERRQ(info);
63: info=TaoPrintInt(tao,"memsize=%d\n",ls->memorySize);CHKERRQ(info);
65: TaoFunctionReturn(0);
66: }
70: static int TaoApply_PreProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
71: double f, double step,
72: double *ref, int *idx, int *info2)
73: {
74: int info, i;
76: TaoFunctionBegin;
78: *info2 = 0;
80: // Check linesearch parameters
81: if (step < 0) {
82: info = PetscInfo1(tao, "TaoApply_ProjectedArmijo:Line search error: step (%g) < 0\n", step); CHKERRQ(info);
83: *info2 = -1;
84: TaoFunctionReturn(0);
85: } else if (ls->alpha < 1) {
86: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: alpha (%g) < 1\n", ls->alpha); CHKERRQ(info);
87: *info2 = -2;
88: TaoFunctionReturn(0);
89: } else if ((ls->beta <= 0) || (ls->beta >= 1)) {
90: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: beta (%g) invalid\n", ls->beta); CHKERRQ(info);
91: *info2 = -3;
92: TaoFunctionReturn(0);
93: } else if ((ls->sigma <= 0) || (ls->sigma >= 0.5)) {
94: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: sigma (%g) invalid\n", ls->sigma); CHKERRQ(info);
95: *info2 = -4;
96: TaoFunctionReturn(0);
97: } else if (ls->minimumStep <= 0) {
98: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: minimum_step (%g) <= 0\n", ls->minimumStep); CHKERRQ(info);
99: *info2 = -5;
100: TaoFunctionReturn(0);
101: } else if (ls->memorySize < 1) {
102: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:Line search error: memory_size (%d) < 1\n", ls->memorySize); CHKERRQ(info);
103: *info2 = -6;
104: TaoFunctionReturn(0);
105: } else if ((ls->referencePolicy != REFERENCE_MAX) &&
106: (ls->referencePolicy != REFERENCE_AVE) &&
107: (ls->referencePolicy != REFERENCE_MEAN)){
108: info = PetscInfo(tao,"TaoApply_ProjectedArmijo:Line search error: reference_policy invalid\n"); CHKERRQ(info);
109: *info2 = -7;
110: TaoFunctionReturn(0);
111: } else if ((ls->replacementPolicy != REPLACE_FIFO) &&
112: (ls->replacementPolicy != REPLACE_MRU)) {
113: info = PetscInfo(tao,"TaoApply_ProjectedArmijo:Line search error: replacement_policy invalid\n"); CHKERRQ(info);
114: *info2 = -8;
115: TaoFunctionReturn(0);
116: }
118: // Check to see of the memory has been allocated. If not, allocate
119: // the historical array and populate it with the initial function
120: // values.
122: if (ls->memory == TAO_NULL) {
123: info = TaoMalloc(sizeof(double)*ls->memorySize, &ls->memory);CHKERRQ(info);
124: info = PetscLogObjectMemory(tao, sizeof(double)*ls->memorySize); CHKERRQ(info);
125: }
127: if (tao->iter == 0) {
128: for (i = 0; i < ls->memorySize; i++) {
129: ls->memory[i] = ls->alpha*(f);
130: }
132: ls->current = 0;
133: ls->lastReference = ls->memory[0];
134: }
136: // Calculate reference value (MAX)
137: *ref = ls->memory[0];
138: *idx = 0;
140: for (i = 1; i < ls->memorySize; i++) {
141: if (ls->memory[i] > *ref) {
142: *ref = ls->memory[i];
143: *idx = i;
144: }
145: }
147: if (ls->referencePolicy == REFERENCE_AVE) {
148: *ref = 0;
149: for (i = 0; i < ls->memorySize; i++) {
150: *ref += ls->memory[i];
151: }
152: *ref = *ref / ls->memorySize;
153: *ref = TaoMax(*ref, ls->memory[ls->current]);
154: } else if (ls->referencePolicy == REFERENCE_MEAN) {
155: *ref = TaoMin(*ref, 0.5*(ls->lastReference + ls->memory[ls->current]));
156: }
158: TaoFunctionReturn(0);
159: }
163: static int TaoApply_PostProjectedArmijo(TAO_SOLVER tao, TAO_PROJECTEDARMIJO *ls,
164: double f, double step,
165: double ref, int idx, int *info2)
166: {
167: int info;
168: TaoFunctionBegin;
170: *info2 = 0;
172: // Check termination
173: if (step < ls->minimumStep) {
174: info = PetscInfo(tao, "TaoApply_ProjectedArmijo:Step is at lower bound.\n"); CHKERRQ(info);
175: *info2 = 1;
176: TaoFunctionReturn(0);
177: }
179: // Successful termination, update memory
180: ls->lastReference = ref;
181: if (ls->replacementPolicy == REPLACE_FIFO) {
182: ls->memory[ls->current++] = f;
183: if (ls->current >= ls->memorySize) {
184: ls->current = 0;
185: }
186: } else {
187: ls->current = idx;
188: ls->memory[idx] = f;
189: }
190: TaoFunctionReturn(0);
191: }
195: /* @ TaoApply_ProjectedArmijo - This routine performs a linesearch. It
196: backtracks until the (nonmonotone) Projected Armijo conditions are satisfied.
198: Input Parameters:
199: + tao - TAO_SOLVER context
200: . X - current iterate (on output X contains new iterate, X + step*S)
201: . S - search direction
202: . f - merit function evaluated at X
203: . G - gradient of merit function evaluated at X
204: . W - work vector
205: - step - initial estimate of step length
207: Output parameters:
208: + f - merit function evaluated at new iterate, X + step*S
209: . G - gradient of merit function evaluated at new iterate, X + step*S
210: . X - new iterate
211: - step - final step length
213: Info is set to one of:
214: . 0 - the line search succeeds; the sufficient decrease
215: condition and the directional derivative condition hold
217: negative number if an input parameter is invalid
218: - -1 - step < 0
220: positive number > 1 if the line search otherwise terminates
221: + 1 - Step is at the lower bound, stepmin.
222: @ */
224: static int TaoApply_ProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G,
225: TaoVec *S, TaoVec *W,
226: double *f, double *f_full, double *step,
227: int *info2, void *ctx)
228: {
229: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
230: TaoVec *L, *U, *work;
231: double ref, innerd, t;
232: int idx, info;
233: TaoTruth flag;
235: TaoFunctionBegin;
237: info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
238: if (*info2) {
239: TaoFunctionReturn(0);
240: }
242: if (ls->work!=TAO_NULL){
243: info=X->Compatible(ls->work,&flag); CHKERRQ(info);
244: if (flag==TAO_FALSE){
245: info=TaoVecDestroy(ls->work); CHKERRQ(info);
246: ls->work=TAO_NULL;
247: }
248: }
250: if (ls->work == TAO_NULL) {
251: G->Clone(&(ls->work));
252: }
254: info = TaoGetVariableBounds(tao, &L, &U);
255: work = ls->work;
257: const double sigma = ls->sigma;
258: const double beta = ls->beta;
260: t = *step;
261: while (t >= ls->minimumStep) {
262: // Calculate iterate
263: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
264: info = W->PointwiseMaximum(W, L); CHKERRQ(info);
265: info = W->PointwiseMinimum(W, U); CHKERRQ(info);
267: info = work->Waxpby(1.0, X, -1.0, W); CHKERRQ(info);
268: info = work->Dot(G, &innerd); CHKERRQ(info);
270: if (innerd > 0) {
271: // Calculate function at new iterate
272: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
273: if (*step == t) {
274: *f_full = *f;
275: }
277: // Check descent condition
278: if (*f <= ref - sigma*innerd) {
279: break;
280: }
281: }
282: else if (*step == t) {
283: info = TaoComputeMeritFunction(tao, W, f_full); CHKERRQ(info);
284: }
286: t *= beta;
287: }
289: info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);
291: // Update iterate and compute gradient
292: *step = t;
293: info = X->CopyFrom(W); CHKERRQ(info);
294: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
296: // Finish computations
297: info = PetscInfo1(tao,"TaoApply_ProjectedArmijo:step = %10.4f\n",*step); CHKERRQ(info);
298: TaoFunctionReturn(0);
299: }
303: /* @ TaoApply_NDProjectedArmijo - This routine performs a linesearch. It
304: backtracks until the (nonmonotone) Projected Armijo conditions are
305: satisfied. This is a modified version for a nondifferentiable function.
307: Input Parameters:
308: + tao - TAO_SOLVER context
309: . X - current iterate (on output X contains new iterate, X + step*S)
310: . S - search direction
311: . f - merit function evaluated at X
312: - step - initial estimate of step length
314: Output parameters:
315: + f - merit function evaluated at new iterate, X + step*S
316: . X - new iterate
317: - step - final step length
319: Info is set to one of:
320: . 0 - the line search succeeds; the sufficient decrease
321: condition and the directional derivative condition hold
323: negative number if an input parameter is invalid
324: - -1 - step < 0
326: positive number > 1 if the line search otherwise terminates
327: + 1 - Step is at the lower bound, stepmin.
328: @ */
330: static int TaoApply_NDProjectedArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G,
331: TaoVec *S, TaoVec *W,
332: double *f, double *f_full, double *step,
333: int *info2, void *ctx)
334: {
335: TAO_PROJECTEDARMIJO *ls = (TAO_PROJECTEDARMIJO *)ctx;
336: TaoVec *L, *U;
337: double ref, t;
338: int idx, info;
340: TaoFunctionBegin;
342: info = TaoApply_PreProjectedArmijo(tao, ls, *f, *step, &ref, &idx, info2);
343: if (*info2) {
344: TaoFunctionReturn(0);
345: }
347: info = TaoGetVariableBounds(tao, &L, &U);
349: const double sigma = ls->sigma;
350: const double beta = ls->beta;
352: t = *step;
353: while (t >= ls->minimumStep) {
354: // Calculate iterate
355: info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);
356: info = W->PointwiseMaximum(W, L); CHKERRQ(info);
357: info = W->PointwiseMinimum(W, U); CHKERRQ(info);
359: // Calculate function at new iterate
360: info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
361: if (*step == t) {
362: *f_full = *f;
363: }
365: // Check descent condition
366: if (*f <= (1 - sigma*t)*ref) {
367: break;
368: }
369:
370: t *= beta;
371: }
373: info = TaoApply_PostProjectedArmijo(tao, ls, *f, t, ref, idx, info2);
375: // Update iterate and compute gradient
376: *step = t;
377: info = X->CopyFrom(W); CHKERRQ(info);
378: info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);
380: // Finish computations
381: info = PetscInfo1(tao,"TaoApply_NDProjectedArmijo:step = %10.4f\n",*step); CHKERRQ(info);
382: TaoFunctionReturn(0);
383: }
387: /*@
388: TaoCreateProjectedArmijoLineSearch - Create a non-monotone projected linesearch
390: Input Parameters:
391: . tao - TAO_SOLVER context
394: Note:
395: This algorithm is taken from the following references --
397: Armijo, "Minimization of Functions Having Lipschitz Continuous
398: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
399: pages 1-3, 1966.
400: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
401: Equations," Journal of Optimization Theory and Applications, volume 81,
402: pages 53-71, 1994.
403: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
404: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
405: pages 707-716, 1986.
406: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
407: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
408: pages 779-805, 1991.
410: Note:
411: This line seach enforces non-monotone Armijo descent conditions for
412: bounds constrained optimization. This routine is used within the
413: following TAO solvers: feasible semismooth with linesearch (tao_ssfls).
415: Level: developer
417: .keywords: TAO_SOLVER, linesearch
418: @*/
419: int TaoCreateProjectedArmijoLineSearch(TAO_SOLVER tao)
420: {
421: TAO_PROJECTEDARMIJO *ls;
422: int info;
424: TaoFunctionBegin;
426: info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
427: info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO)); CHKERRQ(info);
429: ls->work = TAO_NULL;
430: ls->memory = TAO_NULL;
431: ls->alpha = 1.0;
432: ls->beta = 0.5;
433: ls->sigma = 1e-4;
434: ls->minimumStep = TAO_EPSILON;
435: ls->memorySize = 1;
436: ls->referencePolicy = REFERENCE_MAX;
437: ls->replacementPolicy = REPLACE_MRU;
439: info = TaoSetLineSearch(tao,0,
440: TaoSetOptions_ProjectedArmijo,
441: TaoApply_ProjectedArmijo,
442: TaoView_ProjectedArmijo,
443: TaoDestroy_ProjectedArmijo,
444: (void *) ls);CHKERRQ(info);
446: TaoFunctionReturn(0);
447: }
451: /*@
452: TaoCreateNDProjectedArmijoLineSearch - Create a non-monotone projected linesearch
453: for a nondifferentiable function
455: Input Parameters:
456: . tao - TAO_SOLVER context
459: Note:
460: This algorithm is taken from the following references --
462: Armijo, "Minimization of Functions Having Lipschitz Continuous
463: First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
464: pages 1-3, 1966.
465: Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
466: Equations," Journal of Optimization Theory and Applications, volume 81,
467: pages 53-71, 1994.
468: Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
469: for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
470: pages 707-716, 1986.
471: Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
472: Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
473: pages 779-805, 1991.
475: Note:
476: This line seach enforces non-monotone Armijo descent conditions for
477: bounds constrained optimization. This routine is used within the
478: following TAO solvers: feasible semismooth with linesearch (tao_ssfls).
480: Level: developer
482: .keywords: TAO_SOLVER, linesearch
483: @*/
484: int TaoCreateNDProjectedArmijoLineSearch(TAO_SOLVER tao)
485: {
486: TAO_PROJECTEDARMIJO *ls;
487: int info;
489: TaoFunctionBegin;
491: info = TaoNew(TAO_PROJECTEDARMIJO,&ls); CHKERRQ(info);
492: info = PetscLogObjectMemory(tao,sizeof(TAO_PROJECTEDARMIJO));CHKERRQ(info);
494: ls->work = TAO_NULL;
495: ls->memory = TAO_NULL;
496: ls->alpha = 1.0;
497: ls->beta = 0.5;
498: ls->sigma = 1e-4;
499: ls->minimumStep = TAO_EPSILON;
500: ls->memorySize = 1;
501: ls->referencePolicy = REFERENCE_MAX;
502: ls->replacementPolicy = REPLACE_MRU;
504: info = TaoSetLineSearch(tao,0,
505: TaoSetOptions_ProjectedArmijo,
506: TaoApply_NDProjectedArmijo,
507: TaoView_ProjectedArmijo,
508: TaoDestroy_ProjectedArmijo,
509: (void *) ls);CHKERRQ(info);
511: TaoFunctionReturn(0);
512: }