Actual source code: blmvm.c
1: /*$Id$*/
3: #include "blmvm.h"
5: /*------------------------------------------------------------*/
8: static int TaoSolve_BLMVM(TAO_SOLVER tao, void *solver)
9: {
10: TAO_BLMVM *blm = (TAO_BLMVM *)solver;
11: TaoVec *X, *XL = blm->XL, *XU = blm->XU;
12: TaoVec *G = blm->G, *GP = blm->GP, *D = blm->D;
13: TaoVec *Xold, *Gold;
14: TaoLMVMMat *M = blm->M;
16: TaoTerminateReason reason;
17: TaoTruth success;
19: double f, f_full, fold, gdx, gnorm;
20: double step = 1.0;
22: int iter = 0, status = 0, info;
23:
24: TaoFunctionBegin;
25:
26: // Get vectors we will need
27: info = TaoGetSolution(tao, &X); CHKERRQ(info);
28: info = X->Clone(&Xold); CHKERRQ(info);
29: info = X->Clone(&Gold); CHKERRQ(info);
31: info = TaoEvaluateVariableBounds(tao, XL, XU); CHKERRQ(info);
32:
33: // Project initial point onto bounds
34: info = X->Median(XL, X, XU); CHKERRQ(info);
36: // Check convergence criteria
37: info = TaoComputeMeritFunctionGradient(tao, X, &f, G); CHKERRQ(info);
38: info = GP->BoundGradientProjection(G, XL, X, XU); CHKERRQ(info);
39: info = GP->Norm2(&gnorm); CHKERRQ(info);
40: if ((f != f) || (gnorm != gnorm)) {
41: SETERRQ(1, "User provided compute function generated Not-a-Number");
42: }
44: info = TaoMonitor(tao, iter, f, gnorm, 0.0, step, &reason); CHKERRQ(info);
45: if (reason != TAO_CONTINUE_ITERATING) {
46: TaoFunctionReturn(0);
47: }
49: // Set initial scaling for the function
50: if (f != 0.0) {
51: info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
52: }
53: else {
54: info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
55: }
56:
57: // Set counter for gradient/reset steps
58: blm->grad = 0;
59: blm->reset = 0;
61: // Have not converged; continue with Newton method
62: while (reason == TAO_CONTINUE_ITERATING) {
63:
64: // Compute direction
65: info = M->Update(X, GP); CHKERRQ(info);
66: info = M->Solve(G, D, &success); CHKERRQ(info);
67: info = GP->BoundGradientProjection(D, XL, X, XU); CHKERRQ(info);
69: // Check for success (descent direction)
70: info = GP->Dot(G, &gdx); CHKERRQ(info);
71: if (gdx <= 0) {
72: // Step is not descent or solve was not successful
73: // Use steepest descent direction (scaled)
74: ++blm->grad;
76: if (f != 0.0) {
77: info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
78: }
79: else {
80: info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
81: }
82: info = M->Reset(); CHKERRQ(info);
83: info = M->Update(X, G); CHKERRQ(info);
84: info = M->Solve(G, D, &success); CHKERRQ(info);
85: }
86: info = D->Negate(); CHKERRQ(info);
88: // Perform the linesearch
89: fold = f;
90: info = Xold->CopyFrom(X); CHKERRQ(info);
91: info = Gold->CopyFrom(G); CHKERRQ(info);
93: step = 1.0;
94: info = TaoLineSearchApply(tao, X, G, D, GP, &f, &f_full, &step, &status); CHKERRQ(info);
96: if (status) {
97: // Linesearch failed
98: // Reset factors and use scaled (projected) gradient step
99: ++blm->reset;
101: f = fold;
102: info = X->CopyFrom(Xold); CHKERRQ(info);
103: info = G->CopyFrom(Gold); CHKERRQ(info);
105: if (f != 0.0) {
106: info = M->SetDelta(2.0 * TaoAbsDouble(f) / (gnorm*gnorm)); CHKERRQ(info);
107: }
108: else {
109: info = M->SetDelta(2.0 / (gnorm*gnorm)); CHKERRQ(info);
110: }
111: info = M->Reset(); CHKERRQ(info);
112: info = M->Update(X, G); CHKERRQ(info);
113: info = M->Solve(G, D, &success); CHKERRQ(info);
114: info = D->Negate(); CHKERRQ(info);
116: // This may be incorrect; linesearch has values fo stepmax and stepmin
117: // that should be reset.
118: step = 1.0;
119: info = TaoLineSearchApply(tao, X, G, D, GP, &f, &f_full, &step, &status); CHKERRQ(info);
121: if (status) {
122: // Linesearch failed
123: // Probably stop here
124: }
125: }
127: // Check for termination
128: info = GP->BoundGradientProjection(G, XL, X, XU); CHKERRQ(info);
129: info = GP->Norm2(&gnorm); CHKERRQ(info);
130: if ((f != f) || (gnorm != gnorm)) {
131: SETERRQ(1, "User provided compute function generated Not-a-Number");
132: }
133: info = TaoMonitor(tao, ++iter, f, gnorm, 0.0, step, &reason); CHKERRQ(info);
134: }
136: info = TaoVecDestroy(Xold); CHKERRQ(info);
137: info = TaoVecDestroy(Gold); CHKERRQ(info);
138: TaoFunctionReturn(0);
139: }
143: static int TaoSetUp_BLMVM(TAO_SOLVER tao, void *solver)
144: {
145: TAO_BLMVM *blm = (TAO_BLMVM *)solver;
146: TaoVec *X;
147: int info;
149: TaoFunctionBegin;
151: info = TaoGetSolution(tao, &X); CHKERRQ(info);
152: info = X->Clone(&blm->XL); CHKERRQ(info);
153: info = X->Clone(&blm->XU); CHKERRQ(info);
154: info = X->Clone(&blm->D); CHKERRQ(info);
155: info = X->Clone(&blm->G); CHKERRQ(info);
156: info = X->Clone(&blm->GP); CHKERRQ(info);
158: info = TaoSetLagrangianGradientVector(tao, blm->GP); CHKERRQ(info);
159: info = TaoSetStepDirectionVector(tao, blm->D); CHKERRQ(info);
160: info = TaoSetVariableBounds(tao, blm->XL, blm->XU); CHKERRQ(info);
161:
162: // Create matrix for the limited memory approximation
163: blm->M = new TaoLMVMMat(X);
165: info = TaoCheckFG(tao); CHKERRQ(info);
166: TaoFunctionReturn(0);
167: }
169: /* ---------------------------------------------------------- */
172: static int TaoSetDown_BLMVM(TAO_SOLVER tao, void *solver)
173: {
174: TAO_BLMVM *blm = (TAO_BLMVM *)solver;
175: int info;
177: TaoFunctionBegin;
179: info=TaoVecDestroy(blm->XL); CHKERRQ(info);
180: info=TaoVecDestroy(blm->XU); CHKERRQ(info);
181: info=TaoVecDestroy(blm->G); CHKERRQ(info);
182: info=TaoVecDestroy(blm->GP); CHKERRQ(info);
183: info=TaoVecDestroy(blm->D); CHKERRQ(info);
184: info=TaoMatDestroy(blm->M); CHKERRQ(info);
185:
186: info = TaoSetLagrangianGradientVector(tao, 0); CHKERRQ(info);
187: info = TaoSetStepDirectionVector(tao, 0); CHKERRQ(info);
188: info = TaoSetVariableBounds(tao, 0, 0); CHKERRQ(info);
190: TaoFunctionReturn(0);
191: }
193: /*------------------------------------------------------------*/
196: static int TaoSetOptions_BLMVM(TAO_SOLVER tao, void *solver)
197: {
198: int info;
200: TaoFunctionBegin;
201: info = TaoOptionsHead("Limited-memory variable-metric method for bound constrained optimization"); CHKERRQ(info);
202: info = TaoLineSearchSetFromOptions(tao);CHKERRQ(info);
203: info = TaoOptionsTail();CHKERRQ(info);
204: TaoFunctionReturn(0);
205: }
208: /*------------------------------------------------------------*/
211: static int TaoView_BLMVM(TAO_SOLVER tao, void *solver)
212: {
213: TAO_BLMVM *blm = (TAO_BLMVM *) solver;
214: int info;
216: TaoFunctionBegin;
217: info = TaoPrintInt(tao, " Rejected matrix updates: %d\n", blm->M->GetRejects()); CHKERRQ(info);
218: info = TaoPrintInt(tao, " Gradient steps: %d\n", blm->grad); CHKERRQ(info);
219: info = TaoPrintInt(tao, " Reset steps: %d\n", blm->reset); CHKERRQ(info);
220: info = TaoLineSearchView(tao); CHKERRQ(info);
221: TaoFunctionReturn(0);
222: }
226: static int TaoGetDualVariables_BLMVM(TAO_SOLVER tao, TaoVec *DXL, TaoVec* DXU, void *solver)
227: {
228: TAO_BLMVM *blm = (TAO_BLMVM *) solver;
229: TaoVec *G = blm->G, *GP = blm->GP;
230: int info;
232: TaoFunctionBegin;
233: info = DXL->Waxpby(-1.0, G, 1.0, GP); CHKERRQ(info);
234: info = DXU->SetToZero(); CHKERRQ(info);
235: info = DXL->PointwiseMaximum(DXL, DXU); CHKERRQ(info);
237: info = DXU->Waxpby(-1.0, GP, 1.0, G); CHKERRQ(info);
238: info = DXU->Axpy(1.0, DXL); CHKERRQ(info);
239: TaoFunctionReturn(0);
240: }
242: /* ---------------------------------------------------------- */
246: int TaoCreate_BLMVM(TAO_SOLVER tao)
247: {
248: TAO_BLMVM *blm;
249: int info;
251: TaoFunctionBegin;
253: info = TaoNew(TAO_BLMVM, &blm); CHKERRQ(info);
254: info = PetscLogObjectMemory(tao, sizeof(TAO_BLMVM)); CHKERRQ(info);
256: info = TaoSetTaoSolveRoutine(tao, TaoSolve_BLMVM, (void*)blm); CHKERRQ(info);
257: info = TaoSetTaoSetUpDownRoutines(tao, TaoSetUp_BLMVM, TaoSetDown_BLMVM); CHKERRQ(info);
258: info = TaoSetTaoOptionsRoutine(tao, TaoSetOptions_BLMVM); CHKERRQ(info);
259: info = TaoSetTaoViewRoutine(tao, TaoView_BLMVM); CHKERRQ(info);
260: info = TaoSetTaoDualVariablesRoutine(tao, TaoGetDualVariables_BLMVM); CHKERRQ(info);
262: info = TaoSetMaximumIterates(tao, 2000); CHKERRQ(info);
263: info = TaoSetMaximumFunctionEvaluations(tao, 4000); CHKERRQ(info);
264: info = TaoSetTolerances(tao, 1e-4, 1e-4, 0, 0); CHKERRQ(info);
266: info = TaoCreateMoreThuenteBoundLineSearch(tao, 0, 0); CHKERRQ(info);
267: TaoFunctionReturn(0);
268: }