Actual source code: asfls.c
1: #include "src/complementarity/impls/ssls/ssls.h"
2: // Context for ASXLS
3: // -- active-set - reduced matrices formed
4: // - inherit properties of original system
5: // -- semismooth (S) - function not differentiable
6: // - merit function continuously differentiable
7: // - Fischer-Burmeister reformulation of complementarity
8: // - Billups composition for two finite bounds
9: // -- infeasible (I) - iterates not guaranteed to remain within bounds
10: // -- feasible (F) - iterates guaranteed to remain within bounds
11: // -- linesearch (LS) - Armijo rule on direction
12: //
13: // Many other reformulations are possible and combinations of
14: // feasible/infeasible and linesearch/trust region are possible.
15: //
16: // Basic theory
17: // Fischer-Burmeister reformulation is semismooth with a continuously
18: // differentiable merit function and strongly semismooth if the F has
19: // lipschitz continuous derivatives.
20: //
21: // Every accumulation point generated by the algorithm is a stationary
22: // point for the merit function. Stationary points of the merit function
23: // are solutions of the complementarity problem if
24: // a. the stationary point has a BD-regular subdifferential, or
25: // b. the Schur complement F'/F'_ff is a P_0-matrix where ff is the
26: // index set corresponding to the free variables.
27: //
28: // If one of the accumulation points has a BD-regular subdifferential then
29: // a. the entire sequence converges to this accumulation point at
30: // a local q-superlinear rate
31: // b. if in addition the reformulation is strongly semismooth near
32: // this accumulation point, then the algorithm converges at a
33: // local q-quadratic rate.
34: //
35: // The theory for the feasible version follows from the feasible descent
36: // algorithm framework.
37: //
38: // References:
39: // Billups, "Algorithms for Complementarity Problems and Generalized
40: // Equations," Ph.D thesis, University of Wisconsin - Madison, 1995.
41: // De Luca, Facchinei, Kanzow, "A Semismooth Equation Approach to the
42: // Solution of Nonlinear Complementarity Problems," Mathematical
43: // Programming, 75, pages 407-439, 1996.
44: // Ferris, Kanzow, Munson, "Feasible Descent Algorithms for Mixed
45: // Complementarity Problems," Mathematical Programming, 86,
46: // pages 475-497, 1999.
47: // Fischer, "A Special Newton-type Optimization Method," Optimization,
48: // 24, pages 269-284, 1992
49: // Munson, Facchinei, Ferris, Fischer, Kanzow, "The Semismooth Algorithm
50: // for Large Scale Complementarity Problems," Technical Report 99-06,
51: // University of Wisconsin - Madison, 1999.
54: /*------------------------------------------------------------*/
57: static int TaoSolve_ASFLS(TAO_SOLVER tao, void *solver)
58: {
59: TAO_SSLS *asls = (TAO_SSLS *)solver;
60: TaoVec *x, *l, *u, *ff, *dpsi, *d, *w, *t1, *t2, *da, *db;
61: TaoMat *J,*Jsub;
62: TaoVec *dxfree,*r1, *r2, *r3;
63: // TaoLinearSolver *lsolver;
64: TaoIndexSet *FreeVariableSet,*FixedVariableSet;
65: double psi, psi_full, ndpsi, normd, innerd, t=0;
66: double delta, rho;
67: int iter=0, info;
68: TaoInt nn, nf;
69: TaoTerminateReason reason;
70: TaoTruth flag;
72: TaoFunctionBegin;
74: // Assume that Setup has been called!
75: // Set the structure for the Jacobian and create a linear solver.
77: delta = asls->delta;
78: rho = asls->rho;
80: info = TaoGetSolution(tao, &x); CHKERRQ(info);
81: info = TaoGetVariableBounds(tao, &l, &u); CHKERRQ(info);
82: info = TaoEvaluateVariableBounds(tao,l,u); CHKERRQ(info);
83: info = TaoGetJacobian(tao, &J); CHKERRQ(info);
85: info = x->GetDimension(&nn); CHKERRQ(info);
87: info = x->PointwiseMaximum(x, l); CHKERRQ(info);
88: info = x->PointwiseMinimum(x, u); CHKERRQ(info);
90: info = x->CreateIndexSet(&FreeVariableSet); CHKERRQ(info);
91: info = x->CreateIndexSet(&FixedVariableSet); CHKERRQ(info);
92: info = J->CreateReducedMatrix(FreeVariableSet,FreeVariableSet,&Jsub); CHKERRQ(info);
93: info = x->Clone(&dxfree); CHKERRQ(info);
94: info = x->Clone(&r1); CHKERRQ(info);
95: info = x->Clone(&r2); CHKERRQ(info);
96: info = x->Clone(&r3); CHKERRQ(info);
98: // f = asls->f;
99: ff = asls->ff;
100: dpsi = asls->dpsi;
101: d = asls->d;
102: w = asls->w;
103: t1 = asls->t1;
104: t2 = asls->t2;
105: da = asls->da;
106: db = asls->db;
108: info = TaoSetMeritFunction(tao, Tao_SSLS_Function, Tao_ASLS_FunctionGradient,
109: TAO_NULL, TAO_NULL, asls); CHKERRQ(info);
111: // Calculate the function value and fischer function value at the
112: // current iterate
114: info = TaoComputeMeritFunctionGradient(tao, x, &psi, dpsi); CHKERRQ(info);
115: info = dpsi->Norm2(&ndpsi); CHKERRQ(info);
117: while (1) {
119: // Check the termination criteria
121: info = TaoMonitor(tao, iter++, asls->merit, ndpsi, 0.0, t, &reason); CHKERRQ(info);
122: if (TAO_CONTINUE_ITERATING != reason) break;
124: // We are going to solve a linear system of equations. We need to
125: // set the tolerances for the solve so that we maintain an asymptotic
126: // rate of convergence that is superlinear.
127: // Note: these tolerances are for the reduced system. We really need
128: // to make sure that the full system satisfies the full-space conditions.
130: // This rule gives superlinear asymptotic convergence
131: // asls->atol = TaoMin(0.5, asls->merit*sqrt(asls->merit));
132: // asls->rtol = 0.0;
134: // This rule gives quadratic asymptotic convergence
135: // asls->atol = TaoMin(0.5, asls->merit*asls->merit);
136: // asls->rtol = 0.0;
138: // Calculate a free and fixed set of variables. The fixed set of
139: // variables are those for the d_b is approximately equal to zero.
140: // The definition of approximately changes as we approact the solution
141: // to the problem.
143: // No one rule is guaranteed to work in all cases. The following
144: // definition is based on the norm of the Jacobian matrix. If the
145: // norm is large, the tolerance becomes smaller.
147: info = J->Norm1(&(asls->identifier)); CHKERRQ(info);
148: // asls->identifier = asls->atol / (1 + asls->identifier);
149: asls->identifier = TaoMin(asls->merit, 1e-2) / (1 + asls->identifier);
150: // asls->identifier = 1e-4 / (1 + asls->identifier);
151: // asls->identifier = 1e-4;
152: // asls->identifier = 1e-8;
154: info = t1->SetToConstant(-asls->identifier); CHKERRQ(info);
155: info = t2->SetToConstant( asls->identifier); CHKERRQ(info);
157: info = FixedVariableSet->WhichBetweenOrEqual(t1, db, t2); CHKERRQ(info);
158: info = FreeVariableSet->ComplementOf(FixedVariableSet); CHKERRQ(info);
160: info = FixedVariableSet->GetSize(&nf);
161: PetscPrintf(((PetscObject)tao)->comm,"Fixed size: %d\n", nf);
163: // We now have our partition. Now calculate the direction in the
164: // fixed variable space. This direction is the gradient direction.
166: info = r1->SetReducedVec(ff, FixedVariableSet); CHKERRQ(info);
167: info = r2->SetReducedVec(da, FixedVariableSet); CHKERRQ(info);
168: info = r1->PointwiseDivide(r1, r2); CHKERRQ(info);
170: info = d->SetToZero(); CHKERRQ(info);
171: info = d->ReducedXPY(r1, FixedVariableSet);CHKERRQ(info);
173: // Our direction in the FixedVariableSet is fixed. Calculate the
174: // information needed for the step in the FreeVariableSet. To
175: // do this, we need to know the diagonal perturbation and the
176: // right hand side.
178: info = r1->SetReducedVec(da, FreeVariableSet); CHKERRQ(info);
179: info = r2->SetReducedVec(ff, FreeVariableSet); CHKERRQ(info);
180: info = r3->SetReducedVec(db, FreeVariableSet); CHKERRQ(info);
182: info = r1->PointwiseDivide(r1, r3); CHKERRQ(info);
183: info = r2->PointwiseDivide(r2, r3); CHKERRQ(info);
185: // r1 is the diagonal perturbation
186: // r2 is the right hand side
187: // r3 is no longer needed
189: // Now need to modify r2 for our direction choice in the fixed
190: // variable set: calculate t1 = J*d, take the reduced vector
191: // of t1 and modify r2.
193: info = J->Multiply(d, t1); CHKERRQ(info);
194: info = r3->SetReducedVec(t1, FreeVariableSet); CHKERRQ(info);
195: info = r2->Axpy(-1.0, r3);
197: // Calculate the reduced problem matrix and the direction
199: info = Jsub->SetReducedMatrix(J, FreeVariableSet, FreeVariableSet); CHKERRQ(info);
200: info = Jsub->AddDiagonal(r1); CHKERRQ(info);
201: info = dxfree->SetReducedVec(d, FreeVariableSet);CHKERRQ(info);
202: info = dxfree->SetToZero(); CHKERRQ(info);
204: // Set the convergence for the reduced problem solver.
205: // info = TaoGetLinearSolver(tao, &lsolver); CHKERRQ(info);
206: // info = lsolver->SetTolerances(0.1*asls->rtol, 0.1*asls->atol, 1e30, 10*nn); CHKERRQ(info);
208: // Calculate the reduced direction. (Really negative of Newton
209: // direction. Therefore, rest of the code uses -d.)
211: info = TaoPreLinearSolve(tao, Jsub); CHKERRQ(info);
212: info = TaoLinearSolve(tao, Jsub, r2, dxfree, &flag); CHKERRQ(info);
214: // Add the direction in the free variables back into the real direction.
216: info = d->ReducedXPY(dxfree, FreeVariableSet);CHKERRQ(info);
218: // Calculate the norm of the relative residual in the real space
220: // info = t1->PointwiseMultiply(da, d); CHKERRQ(info);
221: // info = J->Multiply(d, t2); CHKERRQ(info);
222: // info = t2->PointwiseMultiply(db, t2); CHKERRQ(info);
223: // info = t1->Axpy( 1.0, t2); CHKERRQ(info);
224: // info = t1->Axpy(-1.0, ff); CHKERRQ(info);
225: // info = t1->Norm2(&two_norm); CHKERRQ(info);
227: // if (two_norm >= asls->atol) {
228: // printf("Bad absolute residual: actual: %5.4e needed: %5.4e\n",
229: // two_norm, asls->atol);
230: // }
232: // Check the projected real direction for descent and if not, use the
233: // negative gradient direction.
235: info = w->CopyFrom(d); CHKERRQ(info);
236: info = w->Negate(); CHKERRQ(info);
237: info = w->BoundGradientProjection(w, l, x, u); CHKERRQ(info);
239: info = w->Norm2(&normd); CHKERRQ(info);
240: info = w->Dot(dpsi, &innerd); CHKERRQ(info);
242: if (innerd >= -delta*pow(normd, rho)) {
243: info = PetscInfo1(tao, "TaoSolve_SSLS: %d: newton direction not descent\n", iter); CHKERRQ(info);
244: info = d->CopyFrom(dpsi); CHKERRQ(info);
245: info = d->Dot(dpsi, &innerd); CHKERRQ(info);
246: }
247: info = d->Negate(); CHKERRQ(info);
248: innerd = -innerd;
250: // We now have a correct descent direction. Apply a linesearch to
251: // find the new iterate.
253: t = 1;
254: info = TaoLineSearchApply(tao, x, dpsi, d, w,
255: &psi, &psi_full, &t, &tao->lsflag); CHKERRQ(info);
256: info = dpsi->Norm2(&ndpsi); CHKERRQ(info);
257: }
259: info = TaoMatDestroy(Jsub);CHKERRQ(info);
260: info = TaoVecDestroy(dxfree);CHKERRQ(info);
261: info = TaoVecDestroy(r1);CHKERRQ(info);
262: info = TaoVecDestroy(r2);CHKERRQ(info);
263: info = TaoVecDestroy(r3);CHKERRQ(info);
264: info = TaoIndexSetDestroy(FreeVariableSet);CHKERRQ(info);
265: info = TaoIndexSetDestroy(FixedVariableSet);CHKERRQ(info);
267: TaoFunctionReturn(0);
268: }
270: /* ---------------------------------------------------------- */
274: int TaoCreate_ASFLS(TAO_SOLVER tao)
275: {
276: TAO_SSLS *asls;
277: int info;
279: TaoFunctionBegin;
281: info = TaoNew(TAO_SSLS, &asls); CHKERRQ(info);
282: info = PetscLogObjectMemory(tao, sizeof(TAO_SSLS)); CHKERRQ(info);
284: asls->delta = 1e-10;
285: asls->rho = 2.1;
287: asls->identifier = 1e-5;
289: info=TaoSetTaoSolveRoutine(tao,TaoSolve_ASFLS,(void*)asls); CHKERRQ(info);
290: info=TaoSetTaoSetUpDownRoutines(tao,TaoSetUp_SSLS,TaoSetDown_SSLS); CHKERRQ(info);
291: info=TaoSetTaoOptionsRoutine(tao,TaoSetOptions_SSLS); CHKERRQ(info);
292: info=TaoSetTaoViewRoutine(tao,TaoView_SSLS); CHKERRQ(info);
294: info = TaoCreateProjectedArmijoLineSearch(tao); CHKERRQ(info);
296: info = TaoSetMaximumIterates(tao,2000); CHKERRQ(info);
297: info = TaoSetMaximumFunctionEvaluations(tao,4000); CHKERRQ(info);
299: info = TaoSetTolerances(tao,0,0,0,0); CHKERRQ(info);
300: info = TaoSetGradientTolerances(tao,1.0e-16,0.0,0.0); CHKERRQ(info);
301: info = TaoSetFunctionLowerBound(tao,1.0e-8); CHKERRQ(info);
303: TaoFunctionReturn(0);
304: }