Actual source code: armijo.c

  1: #include "armijo.h"

  3: #define REPLACE_FIFO 1
  4: #define REPLACE_MRU  2

  6: #define REFERENCE_MAX  1
  7: #define REFERENCE_AVE  2
  8: #define REFERENCE_MEAN 3

 12: static int TaoDestroy_Armijo(TAO_SOLVER tao, void*ctx)
 13: {
 14:   TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
 15:   int info;

 17:   TaoFunctionBegin;

 19:   if (ls->memory != TAO_NULL) {
 20:     info = TaoFree(ls->memory); CHKERRQ(info);
 21:     ls->memory = TAO_NULL;
 22:   }
 23:   info = TaoFree(ls); CHKERRQ(info);
 24:   TaoFunctionReturn(0);
 25: }

 29: static int TaoSetOptions_Armijo(TAO_SOLVER tao, void* ctx)
 30: {
 31:   TAO_ARMIJO *ls = (TAO_ARMIJO *)tao->linectx;
 32:   int info;

 34:   TaoFunctionBegin;
 35:   info = TaoOptionsHead("Armijo linesearch options");CHKERRQ(info);
 36:   info = TaoOptionDouble("-tao_armijo_alpha", "initial reference constant", "", ls->alpha, &ls->alpha, 0); CHKERRQ(info);
 37:   info = TaoOptionDouble("-tao_armijo_beta_inf", "decrease constant one", "", ls->beta_inf, &ls->beta_inf, 0); CHKERRQ(info);
 38:   info = TaoOptionDouble("-tao_armijo_beta", "decrease constant", "", ls->beta, &ls->beta, 0); CHKERRQ(info);
 39:   info = TaoOptionDouble("-tao_armijo_sigma", "acceptance constant", "", ls->sigma, &ls->sigma, 0); CHKERRQ(info);
 40:   info = TaoOptionInt("-tao_armijo_memory_size", "number of historical elements", "", ls->memorySize, &ls->memorySize, 0); CHKERRQ(info);
 41:   info = TaoOptionDouble("-tao_armijo_minimum_step", "minimum acceptable step", "", ls->minimumStep, &ls->minimumStep, 0); CHKERRQ(info);
 42:   info = TaoOptionInt("-tao_projected_armijo_reference_policy", "policy for updating reference value", "", ls->referencePolicy, &ls->referencePolicy, 0); CHKERRQ(info);
 43:   info = TaoOptionInt("-tao_projected_armijo_replacement_policy", "policy for updating memory", "", ls->replacementPolicy, &ls->replacementPolicy, 0); CHKERRQ(info);
 44:   info = TaoOptionsTail();CHKERRQ(info);
 45:   TaoFunctionReturn(0);
 46: }

 50: static int TaoView_Armijo(TAO_SOLVER tao, void*ctx)
 51: {
 52:   TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;
 53:   int info;

 55:   TaoFunctionBegin;
 56:   
 57:   info=TaoPrintDouble(tao,"  Armijo linesearch: alpha=%g",ls->alpha);CHKERRQ(info);
 58:   info=TaoPrintDouble(tao," beta=%g ",ls->beta);CHKERRQ(info);
 59:   info=TaoPrintDouble(tao,"sigma=%g ",ls->sigma);CHKERRQ(info);
 60:   info=TaoPrintDouble(tao,"minstep=%g,",ls->minimumStep);CHKERRQ(info);
 61:   info=TaoPrintInt(tao,"memsize=%d\n",ls->memorySize);CHKERRQ(info);
 62:   
 63:   TaoFunctionReturn(0);
 64: }

 68: static int TaoApply_PreArmijo(TAO_SOLVER tao, TAO_ARMIJO *ls, 
 69:                               double f, double step, 
 70:                               double *ref, int *idx, int *info2) 
 71: {
 72:   int i, info;

 74:   TaoFunctionBegin;

 76:   *info2 = 0;

 78:   // Check linesearch parameters
 79:   if (step < 0) {
 80:     info = PetscInfo1(tao, "TaoApply_Armijo:Line search error: step (%g) < 0\n", step); CHKERRQ(info);
 81:     *info2 = -1; 
 82:   } 

 84:   if (ls->alpha < 1) {
 85:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: alpha (%g) < 1\n", ls->alpha); CHKERRQ(info);
 86:     *info2 = -2; 
 87:   } 
 88:   
 89:   if ((ls->beta <= 0) || (ls->beta >= 1)) {
 90:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: beta (%g) invalid\n", ls->beta); CHKERRQ(info);
 91:     *info2 = -3; 
 92:   } 
 93:   
 94:   if ((ls->beta_inf <= 0) || (ls->beta_inf >= 1)) {
 95:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: beta_inf (%g) invalid\n", ls->beta_inf); CHKERRQ(info);
 96:     *info2 = -4; 
 97:   } 

 99:   if ((ls->sigma <= 0) || (ls->sigma >= 0.5)) {
100:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: sigma (%g) invalid\n", ls->sigma); CHKERRQ(info);
101:     *info2 = -5; 
102:   } 
103:   
104:   if (ls->minimumStep <= 0) {
105:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: minimum_step (%g) <= 0\n", ls->minimumStep); CHKERRQ(info);
106:     *info2 = -6; 
107:   } 
108:   
109:   if (ls->memorySize < 1) {
110:     info = PetscInfo1(tao,"TaoApply_Armijo:Line search error: memory_size (%d) < 1\n", ls->memorySize); CHKERRQ(info);
111:     *info2 = -7; 
112:   } 
113:   
114:   if ((ls->referencePolicy != REFERENCE_MAX) &&
115:       (ls->referencePolicy != REFERENCE_AVE) &&
116:       (ls->referencePolicy != REFERENCE_MEAN)) {
117:     info = PetscInfo(tao,"TaoApply_Armijo:Line search error: reference_policy invalid\n"); CHKERRQ(info);
118:     *info2 = -8; 
119:   } 
120:   
121:   if ((ls->replacementPolicy != REPLACE_FIFO) && 
122:       (ls->replacementPolicy != REPLACE_MRU)) {
123:     info = PetscInfo(tao,"TaoApply_Armijo:Line search error: replacement_policy invalid\n"); CHKERRQ(info);
124:     *info2 = -9; 
125:   }
126:   
127:   if (TaoInfOrNaN(f)) {
128:     info = PetscInfo(tao,"TaoApply_Armijo:Line search error: initial function inf or nan\n"); CHKERRQ(info);
129:     *info2 = -10; 
130:   }

132:   if (*info2) {
133:     TaoFunctionReturn(0);
134:   }

136:   // Check to see of the memory has been allocated.  If not, allocate
137:   // the historical array and populate it with the initial function
138:   // values.
139:   if (ls->memory == TAO_NULL) {
140:     info = TaoMalloc(sizeof(double)*ls->memorySize, &ls->memory ); CHKERRQ(info);
141:     
142:     info = PetscLogObjectMemory(tao, sizeof(double)*ls->memorySize); CHKERRQ(info);
143:   }

145:   if (tao->iter == 0) {
146:     for (i = 0; i < ls->memorySize; i++) {
147:       ls->memory[i] = ls->alpha*f;
148:     }

150:     ls->current = 0;
151:     ls->lastReference = ls->memory[0];
152:   }

154:   // Calculate reference value (MAX)
155:   *ref = ls->memory[0];
156:   *idx = 0;

158:   for (i = 1; i < ls->memorySize; i++) {
159:     if (ls->memory[i] > *ref) {
160:       *ref = ls->memory[i];
161:       *idx = i;
162:     }
163:   }

165:   if (ls->referencePolicy == REFERENCE_AVE) {
166:     *ref = 0;
167:     for (i = 0; i < ls->memorySize; i++) {
168:       *ref += ls->memory[i];
169:     }
170:     *ref = *ref / ls->memorySize;
171:     *ref = TaoMax(*ref, ls->memory[ls->current]);
172:   } 
173:   else if (ls->referencePolicy == REFERENCE_MEAN) {
174:     *ref = TaoMin(*ref, 0.5*(ls->lastReference + ls->memory[ls->current]));
175:   }
176:   TaoFunctionReturn(0);
177: }

181: static int TaoApply_PostArmijo(TAO_SOLVER tao, TAO_ARMIJO *ls, 
182:                                double f, double step,
183:                                double ref, int idx, int *info2) 
184: {
185:   int info;
186:   TaoFunctionBegin;

188:   *info2 = 0;

190:   // Check termination
191:   if (step < ls->minimumStep) {
192:     info = PetscInfo(tao, "TaoApply_Armijo:Step is at lower bound.\n"); CHKERRQ(info);
193:     *info2 = 1;
194:   }

196:   if (TaoInfOrNaN(f)) {
197:     info = PetscInfo(tao, "TaoApply_Armijo:Function is inf or nan.\n"); CHKERRQ(info);
198:     *info2 = 2;
199:   }

201:   if (*info2) {
202:     TaoFunctionReturn(0);
203:   }

205:   // Successful termination, update memory
206:   ls->lastReference = ref;
207:   if (ls->replacementPolicy == REPLACE_FIFO) {
208:     ls->memory[ls->current++] = f;
209:     if (ls->current >= ls->memorySize) {
210:       ls->current = 0;
211:     }
212:   } 
213:   else {
214:     ls->current = idx;
215:     ls->memory[idx] = f;
216:   }
217:   TaoFunctionReturn(0);
218: }

222: /* @ TaoApply_Armijo - This routine performs a linesearch. It
223:    backtracks until the (nonmonotone) Armijo conditions are satisfied.

225:    Input Parameters:
226: +  tao - TAO_SOLVER context
227: .  X - current iterate (on output X contains new iterate, X + step*S)
228: .  S - search direction
229: .  f - merit function evaluated at X
230: .  G - gradient of merit function evaluated at X
231: .  W - work vector
232: -  step - initial estimate of step length

234:    Output parameters:
235: +  f - merit function evaluated at new iterate, X + step*S
236: .  G - gradient of merit function evaluated at new iterate, X + step*S
237: .  X - new iterate
238: -  step - final step length

240:    Info is set to one of:
241: .   0 - the line search succeeds; the sufficient decrease
242:    condition and the directional derivative condition hold

244:    negative number if an input parameter is invalid
245: -   -1 -  step < 0 

247:    positive number > 1 if the line search otherwise terminates
248: +    1 -  Step is at the lower bound, stepmin.
249: @ */

251: static int TaoApply_Armijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, TaoVec *S,
252:                            TaoVec *W, double *f, double *f_full, double *step,
253:                            int *info2, void *ctx)
254: {
255:   TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;

257:   const double beta = ls->beta;
258:   const double beta_inf = ls->beta_inf;

260:   double fact, ref, t, gdx;
261:   int idx, info;

263:   TaoFunctionBegin;
264:   info = TaoApply_PreArmijo(tao, ls, *f, *step, &ref, &idx, info2);

266: #if defined(PETSC_USE_COMPLEX)
267:   info = G->Dot(S,&cgdx);CHKERRQ(info); gdx = TaoReal(cgdx);
268: #else
269:   info = G->Dot(S,&gdx);CHKERRQ(info);
270: #endif

272:   if (TaoInfOrNaN(gdx)) {
273:     info = PetscInfo(tao,"TaoApply_Armijo:Line search error: gdx is inf or nan\n"); CHKERRQ(info);
274:     *info2 = -11;
275:   }

277:   if (gdx >= 0.0) {
278:     info = PetscInfo(tao,"TaoApply_LineSearch:Search direction not a descent direction\n"); CHKERRQ(info);
279:     *info2 = 12;
280:   }
281:   
282:   if (*info2) {
283:     TaoFunctionReturn(0);
284:   }

286:   fact = ls->sigma * gdx;
287:   t = *step;
288:   while (t >= ls->minimumStep) {
289:     // Calculate iterate
290:     info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);

292:     // Calculate function at new iterate
293:     info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
294:     if (*step == t) {
295:       *f_full = *f;
296:     }

298:     if (TaoInfOrNaN(*f)) {
299:       t *= beta_inf;
300:     }
301:     else {
302:       // Check descent condition
303:       if (*f <= ref + t*fact) {
304:         break;
305:       }

307:       t *= beta;
308:     }
309:   }

311:   info = TaoApply_PostArmijo(tao, ls, *f, t, ref, idx, info2);

313:   // Update iterate and compute gradient
314:   *step = t;
315:   info = X->CopyFrom(W); CHKERRQ(info);
316:   info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);

318:   // Finish computations
319:   info = PetscInfo1(tao, "TaoApply_Armijo:step = %10.4f\n",*step); CHKERRQ(info);
320:   TaoFunctionReturn(0);
321: }

325: /* @ TaoApply_NDArmijo - This routine performs a linesearch. It
326:    backtracks until the (nonmonotone) Armijo conditions are satisfied.
327:    This is modified for a nondifferentiable function.

329:    Input Parameters:
330: +  tao - TAO_SOLVER context
331: .  X - current iterate (on output X contains new iterate, X + step*S)
332: .  S - search direction
333: .  f - merit function evaluated at X
334: -  step - initial estimate of step length

336:    Output parameters:
337: +  f - merit function evaluated at new iterate, X + step*S
338: .  X - new iterate
339: -  step - final step length

341:    Info is set to one of:
342: .   0 - the line search succeeds; the sufficient decrease
343:    condition and the directional derivative condition hold

345:    negative number if an input parameter is invalid
346: -   -1 -  step < 0 

348:    positive number > 1 if the line search otherwise terminates
349: +    1 -  Step is at the lower bound, stepmin.
350: @ */

352: static int TaoApply_NDArmijo(TAO_SOLVER tao, TaoVec *X, TaoVec *G, TaoVec *S,
353:                              TaoVec *W, double *f, double *f_full, double *step,
354:                              int *info2, void *ctx)
355: {
356:   TAO_ARMIJO *ls = (TAO_ARMIJO *)ctx;

358:   const double fact = ls->sigma;
359:   const double beta = ls->beta;
360:   const double beta_inf = ls->beta_inf;

362:   double ref, t;
363:   int idx, info;

365:   TaoFunctionBegin;

367:   info = TaoApply_PreArmijo(tao, ls, *f, *step, &ref, &idx, info2);
368:   if (*info2) {
369:     TaoFunctionReturn(0);
370:   }

372:   t = *step;
373:   while (t >= ls->minimumStep) {
374:     // Calculate iterate
375:     info = W->Waxpby(1.0, X, t, S); CHKERRQ(info);

377:     // Calculate function at new iterate
378:     info = TaoComputeMeritFunction(tao, W, f); CHKERRQ(info);
379:     if (*step == t) {
380:       *f_full = *f;
381:     }

383:     if (TaoInfOrNaN(*f)) {
384:       t *= beta_inf;
385:     }
386:     else { 
387:       // Check descent condition
388:       if (*f <= (1 - fact*t)*ref) {
389:         break;
390:       }

392:       t *= beta;
393:     }
394:   }

396:   info = TaoApply_PostArmijo(tao, ls, *f, t, ref, idx, info2);

398:   // Update iterate and compute gradient
399:   *step = t;
400:   info = X->CopyFrom(W); CHKERRQ(info);
401:   info = TaoComputeMeritGradient(tao, X, G); CHKERRQ(info);

403:   // Finish computations
404:   info = PetscInfo1(tao, "TaoApply_NDArmijo:step = %10.4f\n",*step); CHKERRQ(info);
405:   TaoFunctionReturn(0);
406: }

410: /*@C
411:    TaoCreateArmijoLineSearch - Create a non-monotone linesearch

413:    Input Parameters:
414: .  tao - TAO_SOLVER context


417:    Note:
418:    This algorithm is taken from the following references --

420:    Armijo, "Minimization of Functions Having Lipschitz Continuous
421:      First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
422:      pages 1-3, 1966.
423:    Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
424:      Equations," Journal of Optimization Theory and Applications, volume 81,
425:      pages 53-71, 1994.
426:    Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
427:      for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
428:      pages 707-716, 1986.
429:    Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
430:      Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
431:      pages 779-805, 1991.

433:    Note:
434:    This line seach enforces non-monotone Armijo descent conditions for
435:    unconstrained optimization.  This routine is used within the following
436:    TAO solvers: infeasible semismooth with linesearch (tao_ssils).

438:    Level: developer

440: .keywords: TAO_SOLVER, linesearch
441: @*/
442: int TaoCreateArmijoLineSearch(TAO_SOLVER tao)
443: {
444:   TAO_ARMIJO *ls;
445:   int info;

447:   TaoFunctionBegin;

449:   info = TaoNew(TAO_ARMIJO, &ls);CHKERRQ(info);
450:   info = PetscLogObjectMemory(tao,sizeof(TAO_ARMIJO)); CHKERRQ(info);

452:   ls->memory = TAO_NULL;
453:   ls->alpha = 1.0;
454:   ls->beta = 0.5;
455:   ls->beta_inf = 0.5;
456:   ls->sigma = 1e-4;
457:   ls->minimumStep = TAO_EPSILON;
458:   ls->memorySize = 1;
459:   ls->referencePolicy = REFERENCE_MAX;
460:   ls->replacementPolicy = REPLACE_MRU;

462:   info = TaoSetLineSearch(tao,0,
463:                           TaoSetOptions_Armijo,
464:                           TaoApply_Armijo,
465:                           TaoView_Armijo,
466:                           TaoDestroy_Armijo,
467:                           (void *) ls);CHKERRQ(info);

469:   TaoFunctionReturn(0);
470: }

474: /*@C
475:    TaoCreateNDArmijoLineSearch - Create a non-monotone linesearch for a 
476:      nondifferentiable function

478:    Input Parameters:
479: .  tao - TAO_SOLVER context


482:    Note:
483:    This algorithm is taken from the following references --

485:    Armijo, "Minimization of Functions Having Lipschitz Continuous
486:      First-Partial Derivatives," Pacific Journal of Mathematics, volume 16,
487:      pages 1-3, 1966.
488:    Ferris and Lucidi, "Nonmonotone Stabilization Methods for Nonlinear
489:      Equations," Journal of Optimization Theory and Applications, volume 81,
490:      pages 53-71, 1994.
491:    Grippo, Lampariello, and Lucidi, "A Nonmonotone Line Search Technique
492:      for Newton's Method," SIAM Journal on Numerical Analysis, volume 23,
493:      pages 707-716, 1986.
494:    Grippo, Lampariello, and Lucidi, "A Class of Nonmonotone Stabilization
495:      Methods in Unconstrained Optimization," Numerische Mathematik, volume 59,
496:      pages 779-805, 1991.

498:    Note:
499:    This line seach enforces non-monotone Armijo descent conditions for
500:    unconstrained optimization.  This routine is used within the following
501:    TAO solvers: infeasible semismooth with linesearch (tao_ssils).

503:    Level: developer

505: .keywords: TAO_SOLVER, linesearch
506: @*/
507: int TaoCreateNDArmijoLineSearch(TAO_SOLVER tao)
508: {
509:   TAO_ARMIJO *ls;
510:   int info;

512:   TaoFunctionBegin;

514:   info = TaoNew(TAO_ARMIJO, &ls);CHKERRQ(info);
515:   info = PetscLogObjectMemory(tao,sizeof(TAO_ARMIJO)); CHKERRQ(info);

517:   ls->memory = TAO_NULL;
518:   ls->alpha = 1.0;
519:   ls->beta = 0.5;
520:   ls->beta_inf = 0.5;
521:   ls->sigma = 1e-4;
522:   ls->minimumStep = TAO_EPSILON;
523:   ls->memorySize = 1;
524:   ls->referencePolicy = REFERENCE_MAX;
525:   ls->replacementPolicy = REPLACE_MRU;

527:   info = TaoSetLineSearch(tao,0,
528:                           TaoSetOptions_Armijo,
529:                           TaoApply_NDArmijo,
530:                           TaoView_Armijo,
531:                           TaoDestroy_Armijo,
532:                           (void *) ls);CHKERRQ(info);

534:   TaoFunctionReturn(0);
535: }