SHOGUN
v2.0.0
|
00001 /* 00002 * This program is free software; you can redistribute it and/or modify 00003 * it under the terms of the GNU General Public License as published by 00004 * the Free Software Foundation; either version 3 of the License, or 00005 * (at your option) any later version. 00006 * 00007 * Purpose: solves quadratic programming problem for pattern recognition 00008 * for support vectors 00009 * 00010 * Written (W) 1997-1998 Alex J. Smola 00011 * Written (W) 1999-2009 Soeren Sonnenburg 00012 * Written (W) 1999-2008 Gunnar Raetsch 00013 * Copyright (C) 1997-2009 Fraunhofer Institute FIRST and Max-Planck-Society 00014 */ 00015 00016 #include <shogun/lib/common.h> 00017 #include <shogun/io/SGIO.h> 00018 #include <shogun/mathematics/lapack.h> 00019 #include <shogun/mathematics/Math.h> 00020 #include <shogun/lib/external/pr_loqo.h> 00021 00022 #include <math.h> 00023 #include <time.h> 00024 #include <stdlib.h> 00025 #include <stdio.h> 00026 00027 namespace shogun 00028 { 00029 00030 #define PREDICTOR 1 00031 #define CORRECTOR 2 00032 00033 /***************************************************************** 00034 replace this by any other function that will exit gracefully 00035 in a larger system 00036 ***************************************************************/ 00037 00038 void nrerror(char error_text[]) 00039 { 00040 SG_SDEBUG("terminating optimizer - %s\n", error_text); 00041 // exit(1); 00042 } 00043 00044 /***************************************************************** 00045 taken from numerical recipes and modified to accept pointers 00046 moreover numerical recipes code seems to be buggy (at least the 00047 ones on the web) 00048 00049 cholesky solver and backsubstitution 00050 leaves upper right triangle intact (rows first order) 00051 ***************************************************************/ 00052 00053 #ifdef HAVE_LAPACK 00054 bool choldc(float64_t* a, int32_t n, float64_t* p) 00055 { 00056 if (n<=0) 00057 return false; 00058 00059 float64_t* a2=SG_MALLOC(float64_t, n*n); 00060 00061 for (int32_t i=0; i<n; i++) 00062 { 00063 for (int32_t j=0; j<n; j++) 00064 a2[n*i+j]=a[n*i+j]; 00065 } 00066 00067 /* int for calling external lib */ 00068 int result=clapack_dpotrf(CblasRowMajor, CblasUpper, (int) n, a2, (int) n); 00069 00070 for (int32_t i=0; i<n; i++) 00071 p[i]=a2[(n+1)*i]; 00072 00073 for (int32_t i=0; i<n; i++) 00074 { 00075 for (int32_t j=i+1; j<n; j++) 00076 { 00077 a[n*j+i]=a2[n*i+j]; 00078 } 00079 } 00080 00081 if (result>0) 00082 SG_SDEBUG("Choldc failed, matrix not positive definite\n"); 00083 00084 SG_FREE(a2); 00085 00086 return result==0; 00087 } 00088 #else 00089 bool choldc(float64_t a[], int32_t n, float64_t p[]) 00090 { 00091 void nrerror(char error_text[]); 00092 int32_t i, j, k; 00093 float64_t sum; 00094 00095 for (i = 0; i < n; i++) 00096 { 00097 for (j = i; j < n; j++) 00098 { 00099 sum=a[n*i + j]; 00100 00101 for (k=i-1; k>=0; k--) 00102 sum -= a[n*i + k]*a[n*j + k]; 00103 00104 if (i == j) 00105 { 00106 if (sum <= 0.0) 00107 { 00108 SG_SDEBUG("Choldc failed, matrix not positive definite"); 00109 sum = 0.0; 00110 return false; 00111 } 00112 00113 p[i]=sqrt(sum); 00114 00115 } 00116 else 00117 a[n*j + i] = sum/p[i]; 00118 } 00119 } 00120 00121 return true; 00122 } 00123 #endif 00124 00125 void cholsb( 00126 float64_t a[], int32_t n, float64_t p[], float64_t b[], float64_t x[]) 00127 { 00128 int32_t i, k; 00129 float64_t sum; 00130 00131 for (i=0; i<n; i++) { 00132 sum=b[i]; 00133 for (k=i-1; k>=0; k--) sum -= a[n*i + k]*x[k]; 00134 x[i]=sum/p[i]; 00135 } 00136 00137 for (i=n-1; i>=0; i--) { 00138 sum=x[i]; 00139 for (k=i+1; k<n; k++) sum -= a[n*k + i]*x[k]; 00140 x[i]=sum/p[i]; 00141 } 00142 } 00143 00144 /***************************************************************** 00145 sometimes we only need the forward or backward pass of the 00146 backsubstitution, hence we provide these two routines separately 00147 ***************************************************************/ 00148 00149 void chol_forward( 00150 float64_t a[], int32_t n, float64_t p[], float64_t b[], float64_t x[]) 00151 { 00152 int32_t i, k; 00153 float64_t sum; 00154 00155 for (i=0; i<n; i++) { 00156 sum=b[i]; 00157 for (k=i-1; k>=0; k--) sum -= a[n*i + k]*x[k]; 00158 x[i]=sum/p[i]; 00159 } 00160 } 00161 00162 void chol_backward( 00163 float64_t a[], int32_t n, float64_t p[], float64_t b[], float64_t x[]) 00164 { 00165 int32_t i, k; 00166 float64_t sum; 00167 00168 for (i=n-1; i>=0; i--) { 00169 sum=b[i]; 00170 for (k=i+1; k<n; k++) sum -= a[n*k + i]*x[k]; 00171 x[i]=sum/p[i]; 00172 } 00173 } 00174 00175 /***************************************************************** 00176 solves the system | -H_x A' | |x_x| = |c_x| 00177 | A H_y| |x_y| |c_y| 00178 00179 with H_x (and H_y) positive (semidefinite) matrices 00180 and n, m the respective sizes of H_x and H_y 00181 00182 for variables see pg. 48 of notebook or do the calculations on a 00183 sheet of paper again 00184 00185 predictor solves the whole thing, corrector assues that H_x didn't 00186 change and relies on the results of the predictor. therefore do 00187 _not_ modify workspace 00188 00189 if you want to speed tune anything in the code here's the right 00190 place to do so: about 95% of the time is being spent in 00191 here. something like an iterative refinement would be nice, 00192 especially when switching from float64_t to single precision. if you 00193 have a fast parallel cholesky use it instead of the numrec 00194 implementations. 00195 00196 side effects: changes H_y (but this is just the unit matrix or zero anyway 00197 in our case) 00198 ***************************************************************/ 00199 00200 bool solve_reduced( 00201 int32_t n, int32_t m, float64_t h_x[], float64_t h_y[], float64_t a[], 00202 float64_t x_x[], float64_t x_y[], float64_t c_x[], float64_t c_y[], 00203 float64_t workspace[], int32_t step) 00204 { 00205 int32_t i,j,k; 00206 00207 float64_t *p_x; 00208 float64_t *p_y; 00209 float64_t *t_a; 00210 float64_t *t_c; 00211 float64_t *t_y; 00212 00213 p_x = workspace; /* together n + m + n*m + n + m = n*(m+2)+2*m */ 00214 p_y = p_x + n; 00215 t_a = p_y + m; 00216 t_c = t_a + n*m; 00217 t_y = t_c + n; 00218 00219 if (step == PREDICTOR) { 00220 if (!choldc(h_x, n, p_x)) /* do cholesky decomposition */ 00221 return false; 00222 00223 for (i=0; i<m; i++) /* forward pass for A' */ 00224 chol_forward(h_x, n, p_x, a+i*n, t_a+i*n); 00225 00226 for (i=0; i<m; i++) /* compute (h_y + a h_x^-1A') */ 00227 for (j=i; j<m; j++) 00228 for (k=0; k<n; k++) 00229 h_y[m*i + j] += t_a[n*j + k] * t_a[n*i + k]; 00230 00231 choldc(h_y, m, p_y); /* and cholesky decomposition */ 00232 } 00233 00234 chol_forward(h_x, n, p_x, c_x, t_c); 00235 /* forward pass for c */ 00236 00237 for (i=0; i<m; i++) { /* and solve for x_y */ 00238 t_y[i] = c_y[i]; 00239 for (j=0; j<n; j++) 00240 t_y[i] += t_a[i*n + j] * t_c[j]; 00241 } 00242 00243 cholsb(h_y, m, p_y, t_y, x_y); 00244 00245 for (i=0; i<n; i++) { /* finally solve for x_x */ 00246 t_c[i] = -t_c[i]; 00247 for (j=0; j<m; j++) 00248 t_c[i] += t_a[j*n + i] * x_y[j]; 00249 } 00250 00251 chol_backward(h_x, n, p_x, t_c, x_x); 00252 return true; 00253 } 00254 00255 /***************************************************************** 00256 matrix vector multiplication (symmetric matrix but only one triangle 00257 given). computes m*x = y 00258 no need to tune it as it's only of O(n^2) but cholesky is of 00259 O(n^3). so don't waste your time _here_ although it isn't very 00260 elegant. 00261 ***************************************************************/ 00262 00263 void matrix_vector(int32_t n, float64_t m[], float64_t x[], float64_t y[]) 00264 { 00265 int32_t i, j; 00266 00267 for (i=0; i<n; i++) { 00268 y[i] = m[(n+1) * i] * x[i]; 00269 00270 for (j=0; j<i; j++) 00271 y[i] += m[i + n*j] * x[j]; 00272 00273 for (j=i+1; j<n; j++) 00274 y[i] += m[n*i + j] * x[j]; 00275 } 00276 } 00277 00278 /***************************************************************** 00279 call only this routine; this is the only one you're interested in 00280 for doing quadratical optimization 00281 00282 the restart feature exists but it may not be of much use due to the 00283 fact that an initial setting, although close but not very close the 00284 the actual solution will result in very good starting diagnostics 00285 (primal and dual feasibility and small infeasibility gap) but incur 00286 later stalling of the optimizer afterwards as we have to enforce 00287 positivity of the slacks. 00288 ***************************************************************/ 00289 00290 int32_t pr_loqo( 00291 int32_t n, int32_t m, float64_t c[], float64_t h_x[], float64_t a[], 00292 float64_t b[], float64_t l[], float64_t u[], float64_t primal[], 00293 float64_t dual[], int32_t verb, float64_t sigfig_max, int32_t counter_max, 00294 float64_t margin, float64_t bound, int32_t restart) 00295 { 00296 /* the knobs to be tuned ... */ 00297 /* float64_t margin = -0.95; we will go up to 95% of the 00298 distance between old variables and zero */ 00299 /* float64_t bound = 10; preset value for the start. small 00300 values give good initial 00301 feasibility but may result in slow 00302 convergence afterwards: we're too 00303 close to zero */ 00304 /* to be allocated */ 00305 float64_t *workspace; 00306 float64_t *diag_h_x; 00307 float64_t *h_y; 00308 float64_t *c_x; 00309 float64_t *c_y; 00310 float64_t *h_dot_x; 00311 float64_t *rho; 00312 float64_t *nu; 00313 float64_t *tau; 00314 float64_t *sigma; 00315 float64_t *gamma_z; 00316 float64_t *gamma_s; 00317 00318 float64_t *hat_nu; 00319 float64_t *hat_tau; 00320 00321 float64_t *delta_x; 00322 float64_t *delta_y; 00323 float64_t *delta_s; 00324 float64_t *delta_z; 00325 float64_t *delta_g; 00326 float64_t *delta_t; 00327 00328 float64_t *d; 00329 00330 /* from the header - pointers into primal and dual */ 00331 float64_t *x; 00332 float64_t *y; 00333 float64_t *g; 00334 float64_t *z; 00335 float64_t *s; 00336 float64_t *t; 00337 00338 /* auxiliary variables */ 00339 float64_t b_plus_1; 00340 float64_t c_plus_1; 00341 00342 float64_t x_h_x; 00343 float64_t primal_inf; 00344 float64_t dual_inf; 00345 00346 float64_t sigfig; 00347 float64_t primal_obj, dual_obj; 00348 float64_t mu; 00349 float64_t alfa=-1; 00350 int32_t counter = 0; 00351 00352 int32_t status = STILL_RUNNING; 00353 00354 int32_t i,j; 00355 00356 /* memory allocation */ 00357 workspace = SG_MALLOC(float64_t, (n*(m+2)+2*m)); 00358 diag_h_x = SG_MALLOC(float64_t, n); 00359 h_y = SG_MALLOC(float64_t, m*m); 00360 c_x = SG_MALLOC(float64_t, n); 00361 c_y = SG_MALLOC(float64_t, m); 00362 h_dot_x = SG_MALLOC(float64_t, n); 00363 00364 rho = SG_MALLOC(float64_t, m); 00365 nu = SG_MALLOC(float64_t, n); 00366 tau = SG_MALLOC(float64_t, n); 00367 sigma = SG_MALLOC(float64_t, n); 00368 00369 gamma_z = SG_MALLOC(float64_t, n); 00370 gamma_s = SG_MALLOC(float64_t, n); 00371 00372 hat_nu = SG_MALLOC(float64_t, n); 00373 hat_tau = SG_MALLOC(float64_t, n); 00374 00375 delta_x = SG_MALLOC(float64_t, n); 00376 delta_y = SG_MALLOC(float64_t, m); 00377 delta_s = SG_MALLOC(float64_t, n); 00378 delta_z = SG_MALLOC(float64_t, n); 00379 delta_g = SG_MALLOC(float64_t, n); 00380 delta_t = SG_MALLOC(float64_t, n); 00381 00382 d = SG_MALLOC(float64_t, n); 00383 00384 /* pointers into the external variables */ 00385 x = primal; /* n */ 00386 g = x + n; /* n */ 00387 t = g + n; /* n */ 00388 00389 y = dual; /* m */ 00390 z = y + m; /* n */ 00391 s = z + n; /* n */ 00392 00393 /* initial settings */ 00394 b_plus_1 = 1; 00395 c_plus_1 = 0; 00396 for (i=0; i<n; i++) c_plus_1 += c[i]; 00397 00398 /* get diagonal terms */ 00399 for (i=0; i<n; i++) diag_h_x[i] = h_x[(n+1)*i]; 00400 00401 /* starting point */ 00402 if (restart == 1) { 00403 /* x, y already preset */ 00404 for (i=0; i<n; i++) { /* compute g, t for primal feasibility */ 00405 g[i] = CMath::max(CMath::abs(x[i] - l[i]), bound); 00406 t[i] = CMath::max(CMath::abs(u[i] - x[i]), bound); 00407 } 00408 00409 matrix_vector(n, h_x, x, h_dot_x); /* h_dot_x = h_x * x */ 00410 00411 for (i=0; i<n; i++) { /* sigma is a dummy variable to calculate z, s */ 00412 sigma[i] = c[i] + h_dot_x[i]; 00413 for (j=0; j<m; j++) 00414 sigma[i] -= a[n*j + i] * y[j]; 00415 00416 if (sigma[i] > 0) { 00417 s[i] = bound; 00418 z[i] = sigma[i] + bound; 00419 } 00420 else { 00421 s[i] = bound - sigma[i]; 00422 z[i] = bound; 00423 } 00424 } 00425 } 00426 else { /* use default start settings */ 00427 for (i=0; i<m; i++) 00428 for (j=i; j<m; j++) 00429 h_y[i*m + j] = (i==j) ? 1 : 0; 00430 00431 for (i=0; i<n; i++) { 00432 c_x[i] = c[i]; 00433 h_x[(n+1)*i] += 1; 00434 } 00435 00436 for (i=0; i<m; i++) 00437 c_y[i] = b[i]; 00438 00439 /* and solve the system [-H_x A'; A H_y] [x, y] = [c_x; c_y] */ 00440 solve_reduced(n, m, h_x, h_y, a, x, y, c_x, c_y, workspace, 00441 PREDICTOR); 00442 00443 /* initialize the other variables */ 00444 for (i=0; i<n; i++) { 00445 g[i] = CMath::max(CMath::abs(x[i] - l[i]), bound); 00446 z[i] = CMath::max(CMath::abs(x[i]), bound); 00447 t[i] = CMath::max(CMath::abs(u[i] - x[i]), bound); 00448 s[i] = CMath::max(CMath::abs(x[i]), bound); 00449 } 00450 } 00451 00452 for (i=0, mu=0; i<n; i++) 00453 mu += z[i] * g[i] + s[i] * t[i]; 00454 mu = mu / (2*n); 00455 00456 /* the main loop */ 00457 if (verb >= STATUS) { 00458 SG_SDEBUG("counter | pri_inf | dual_inf | pri_obj | dual_obj | "); 00459 SG_SDEBUG("sigfig | alpha | nu \n"); 00460 SG_SDEBUG("-------------------------------------------------------"); 00461 SG_SDEBUG("---------------------------\n"); 00462 } 00463 00464 while (status == STILL_RUNNING) { 00465 /* predictor */ 00466 00467 /* put back original diagonal values */ 00468 for (i=0; i<n; i++) 00469 h_x[(n+1) * i] = diag_h_x[i]; 00470 00471 matrix_vector(n, h_x, x, h_dot_x); /* compute h_dot_x = h_x * x */ 00472 00473 for (i=0; i<m; i++) { 00474 rho[i] = b[i]; 00475 for (j=0; j<n; j++) 00476 rho[i] -= a[n*i + j] * x[j]; 00477 } 00478 00479 for (i=0; i<n; i++) { 00480 nu[i] = l[i] - x[i] + g[i]; 00481 tau[i] = u[i] - x[i] - t[i]; 00482 00483 sigma[i] = c[i] - z[i] + s[i] + h_dot_x[i]; 00484 for (j=0; j<m; j++) 00485 sigma[i] -= a[n*j + i] * y[j]; 00486 00487 gamma_z[i] = - z[i]; 00488 gamma_s[i] = - s[i]; 00489 } 00490 00491 /* instrumentation */ 00492 x_h_x = 0; 00493 primal_inf = 0; 00494 dual_inf = 0; 00495 00496 for (i=0; i<n; i++) { 00497 x_h_x += h_dot_x[i] * x[i]; 00498 primal_inf += CMath::sq(tau[i]); 00499 primal_inf += CMath::sq(nu[i]); 00500 dual_inf += CMath::sq(sigma[i]); 00501 } 00502 for (i=0; i<m; i++) 00503 primal_inf += CMath::sq(rho[i]); 00504 primal_inf = sqrt(primal_inf)/b_plus_1; 00505 dual_inf = sqrt(dual_inf)/c_plus_1; 00506 00507 primal_obj = 0.5 * x_h_x; 00508 dual_obj = -0.5 * x_h_x; 00509 for (i=0; i<n; i++) { 00510 primal_obj += c[i] * x[i]; 00511 dual_obj += l[i] * z[i] - u[i] * s[i]; 00512 } 00513 for (i=0; i<m; i++) 00514 dual_obj += b[i] * y[i]; 00515 00516 sigfig = log10(CMath::abs(primal_obj) + 1) - 00517 log10(CMath::abs(primal_obj - dual_obj)); 00518 sigfig = CMath::max(sigfig, 0.0); 00519 00520 /* the diagnostics - after we computed our results we will 00521 analyze them */ 00522 00523 if (counter > counter_max) status = ITERATION_LIMIT; 00524 if (sigfig > sigfig_max) status = OPTIMAL_SOLUTION; 00525 if (primal_inf > 10e100) status = PRIMAL_INFEASIBLE; 00526 if (dual_inf > 10e100) status = DUAL_INFEASIBLE; 00527 if ((primal_inf > 10e100) & (dual_inf > 10e100)) status = PRIMAL_AND_DUAL_INFEASIBLE; 00528 if (CMath::abs(primal_obj) > 10e100) status = PRIMAL_UNBOUNDED; 00529 if (CMath::abs(dual_obj) > 10e100) status = DUAL_UNBOUNDED; 00530 00531 /* write some nice routine to enforce the time limit if you 00532 _really_ want, however it's quite useless as you can compute 00533 the time from the maximum number of iterations as every 00534 iteration costs one cholesky decomposition plus a couple of 00535 backsubstitutions */ 00536 00537 /* generate report */ 00538 if ((verb >= FLOOD) | ((verb == STATUS) & (status != 0))) 00539 SG_SDEBUG("%7i | %.2e | %.2e | % .2e | % .2e | %6.3f | %.4f | %.2e\n", 00540 counter, primal_inf, dual_inf, primal_obj, dual_obj, 00541 sigfig, alfa, mu); 00542 00543 counter++; 00544 00545 if (status == 0) { /* we may keep on going, otherwise 00546 it'll cost one loop extra plus a 00547 messed up main diagonal of h_x */ 00548 /* intermediate variables (the ones with hat) */ 00549 for (i=0; i<n; i++) { 00550 hat_nu[i] = nu[i] + g[i] * gamma_z[i] / z[i]; 00551 hat_tau[i] = tau[i] - t[i] * gamma_s[i] / s[i]; 00552 /* diagonal terms */ 00553 d[i] = z[i] / g[i] + s[i] / t[i]; 00554 } 00555 00556 /* initialization before the cholesky solver */ 00557 for (i=0; i<n; i++) { 00558 h_x[(n+1)*i] = diag_h_x[i] + d[i]; 00559 c_x[i] = sigma[i] - z[i] * hat_nu[i] / g[i] - 00560 s[i] * hat_tau[i] / t[i]; 00561 } 00562 for (i=0; i<m; i++) { 00563 c_y[i] = rho[i]; 00564 for (j=i; j<m; j++) 00565 h_y[m*i + j] = 0; 00566 } 00567 00568 /* and do it */ 00569 if (!solve_reduced(n, m, h_x, h_y, a, delta_x, delta_y, c_x, c_y, workspace, PREDICTOR)) 00570 { 00571 status=INCONSISTENT; 00572 goto exit_optimizer; 00573 } 00574 00575 for (i=0; i<n; i++) { 00576 /* backsubstitution */ 00577 delta_s[i] = s[i] * (delta_x[i] - hat_tau[i]) / t[i]; 00578 delta_z[i] = z[i] * (hat_nu[i] - delta_x[i]) / g[i]; 00579 00580 delta_g[i] = g[i] * (gamma_z[i] - delta_z[i]) / z[i]; 00581 delta_t[i] = t[i] * (gamma_s[i] - delta_s[i]) / s[i]; 00582 00583 /* central path (corrector) */ 00584 gamma_z[i] = mu / g[i] - z[i] - delta_z[i] * delta_g[i] / g[i]; 00585 gamma_s[i] = mu / t[i] - s[i] - delta_s[i] * delta_t[i] / t[i]; 00586 00587 /* (some more intermediate variables) the hat variables */ 00588 hat_nu[i] = nu[i] + g[i] * gamma_z[i] / z[i]; 00589 hat_tau[i] = tau[i] - t[i] * gamma_s[i] / s[i]; 00590 00591 /* initialization before the cholesky */ 00592 c_x[i] = sigma[i] - z[i] * hat_nu[i] / g[i] - s[i] * hat_tau[i] / t[i]; 00593 } 00594 00595 for (i=0; i<m; i++) { /* comput c_y and rho */ 00596 c_y[i] = rho[i]; 00597 for (j=i; j<m; j++) 00598 h_y[m*i + j] = 0; 00599 } 00600 00601 /* and do it */ 00602 solve_reduced(n, m, h_x, h_y, a, delta_x, delta_y, c_x, c_y, workspace, 00603 CORRECTOR); 00604 00605 for (i=0; i<n; i++) { 00606 /* backsubstitution */ 00607 delta_s[i] = s[i] * (delta_x[i] - hat_tau[i]) / t[i]; 00608 delta_z[i] = z[i] * (hat_nu[i] - delta_x[i]) / g[i]; 00609 00610 delta_g[i] = g[i] * (gamma_z[i] - delta_z[i]) / z[i]; 00611 delta_t[i] = t[i] * (gamma_s[i] - delta_s[i]) / s[i]; 00612 } 00613 00614 alfa = -1; 00615 for (i=0; i<n; i++) { 00616 alfa = CMath::min(alfa, delta_g[i]/g[i]); 00617 alfa = CMath::min(alfa, delta_t[i]/t[i]); 00618 alfa = CMath::min(alfa, delta_s[i]/s[i]); 00619 alfa = CMath::min(alfa, delta_z[i]/z[i]); 00620 } 00621 alfa = (margin - 1) / alfa; 00622 00623 /* compute mu */ 00624 for (i=0, mu=0; i<n; i++) 00625 mu += z[i] * g[i] + s[i] * t[i]; 00626 mu = mu / (2*n); 00627 mu = mu * CMath::sq((alfa - 1) / (alfa + 10)); 00628 00629 for (i=0; i<n; i++) { 00630 x[i] += alfa * delta_x[i]; 00631 g[i] += alfa * delta_g[i]; 00632 t[i] += alfa * delta_t[i]; 00633 z[i] += alfa * delta_z[i]; 00634 s[i] += alfa * delta_s[i]; 00635 } 00636 00637 for (i=0; i<m; i++) 00638 y[i] += alfa * delta_y[i]; 00639 } 00640 } 00641 00642 exit_optimizer: 00643 if ((status == 1) && (verb >= STATUS)) { 00644 SG_SDEBUG("----------------------------------------------------------------------------------\n"); 00645 SG_SDEBUG("optimization converged\n"); 00646 } 00647 00648 /* free memory */ 00649 SG_FREE(workspace); 00650 SG_FREE(diag_h_x); 00651 SG_FREE(h_y); 00652 SG_FREE(c_x); 00653 SG_FREE(c_y); 00654 SG_FREE(h_dot_x); 00655 00656 SG_FREE(rho); 00657 SG_FREE(nu); 00658 SG_FREE(tau); 00659 SG_FREE(sigma); 00660 SG_FREE(gamma_z); 00661 SG_FREE(gamma_s); 00662 00663 SG_FREE(hat_nu); 00664 SG_FREE(hat_tau); 00665 00666 SG_FREE(delta_x); 00667 SG_FREE(delta_y); 00668 SG_FREE(delta_s); 00669 SG_FREE(delta_z); 00670 SG_FREE(delta_g); 00671 SG_FREE(delta_t); 00672 00673 SG_FREE(d); 00674 00675 /* and return to sender */ 00676 return status; 00677 } 00678 }