CppAD: A C++ Algorithmic Differentiation Package 20110419
|
00001 /* $Id$ */ 00002 /* -------------------------------------------------------------------------- 00003 CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-10 Bradley M. Bell 00004 00005 CppAD is distributed under multiple licenses. This distribution is under 00006 the terms of the 00007 Common Public License Version 1.0. 00008 00009 A copy of this license is included in the COPYING file of this distribution. 00010 Please visit http://www.coin-or.org/CppAD/ for information on other licenses. 00011 -------------------------------------------------------------------------- */ 00012 # include "cppad_ipopt_nlp.hpp" 00013 # include "sparse_map2vec.hpp" 00014 # include "jac_g_map.hpp" 00015 # include "hes_fg_map.hpp" 00016 # include "vec_fun_pattern.hpp" 00017 # include "fun_record.hpp" 00018 00019 /*! 00020 \file cppad_ipopt_nlp.cpp 00021 \brief Member functions for the cppad_ipopt_nlp class. 00022 */ 00023 00024 /// If 0 tracing is off, otherwise tracing is on. 00025 # define CPPAD_IPOPT_NLP_TRACE 0 00026 00027 # if CPPAD_IPOPT_NLP_TRACE 00028 # include <cstdio> 00029 # endif 00030 00031 00032 /*! 00033 Constructor for the \ref Nonlinear_Programming_Problem. 00034 00035 \param n 00036 dimension of the domain space for f(x) and g(x). 00037 00038 \param m 00039 dimension of the range space for g(x) 00040 00041 \param x_i 00042 initial value of x during the optimization procedure (size n). 00043 00044 \param x_l 00045 lower limit for x (size n). 00046 00047 \param x_u 00048 upper limit for x (size n). 00049 00050 \param g_l 00051 lower limit for g(x) (size m). 00052 00053 \param g_u 00054 upper limit for g(x) (size m). 00055 00056 \param fg_info 00057 pointer to base class version of derived class object used to get 00058 information about the user's representation for f(x) and g(x). 00059 (The object pointed to must not be deleted before this cppad_ipopt_nlp object). 00060 00061 \param solution 00062 pointer to object where final results are stored. 00063 (The object pointed to must not be deleted before this cppad_ipopt_nlp object). 00064 00065 \par Constants 00066 The following values are set by the constructor and are \c const 00067 or effectively \c const; i.e., they are set by the constructor and should 00068 not be changed: 00069 \verbatim 00070 n_, m_, x_i_, x_l_, x_u_, g_l_, g_u_, K_, L_, p_, q_, retape_, 00071 pattern_jac_r_, pattern_hes_r_, index_jac_g_, index_hes_fg_, 00072 nnz_jac_g_, iRow_jac_g_, jCol_jac_g_, 00073 nnz_h_lag_, iRow_h_lag_, jCol_h_lag_, 00074 \endverbatim 00075 In addition, the function calls <tt>fg_info->set_n(n)</tt> 00076 and <tt>fg_info->set_m(m)</tt> are used to set the values of \c n 00077 and \c m in \c fg_info. 00078 00079 \par Variables 00080 The following arrays have fixed size which is set during this constructor: 00081 00082 \li \c tape_ok_ has size \c K_. It is initialized as true for indices 00083 \c k such that <tt>retape[k]</tt> is false. 00084 00085 \li \c r_fun_ has size \c K_. It is initilaize with the default 00086 \c ADFun constructor. Then, for indices \c k such that 00087 <tt>retape[k]</tt> is false, the operation sequence corresponding 00088 to \f$ r_k (u) \f$ is stored in <tt>r_fun_[k]</tt>. 00089 00090 \li \c I_ has size equal to the maximum of <tt>p[k]</tt> w.r.t \c k. 00091 00092 \li \c J_ has size equal to the maximum of <tt>q[k]</tt> w.r.t \c k. 00093 00094 \par NDEBUG 00095 If the preprocessor symbol \c NEBUG is not defined, 00096 certain of the assumptions about the function calls of the form 00097 \verbatim 00098 fg_info->index(k, ell, I, J) 00099 \endverbatim 00100 are checked to make sure they hold. 00101 */ 00102 cppad_ipopt_nlp::cppad_ipopt_nlp( 00103 size_t n , 00104 size_t m , 00105 const NumberVector &x_i , 00106 const NumberVector &x_l , 00107 const NumberVector &x_u , 00108 const NumberVector &g_l , 00109 const NumberVector &g_u , 00110 cppad_ipopt_fg_info* fg_info , 00111 cppad_ipopt_solution* solution ) 00112 : n_ ( n ), 00113 m_ ( m ), 00114 x_i_ ( x_i ), 00115 x_l_ ( x_l ), 00116 x_u_ ( x_u ), 00117 g_l_ ( g_l ), 00118 g_u_ ( g_u ), 00119 fg_info_ ( fg_info ) , 00120 solution_ (solution) 00121 { size_t k; 00122 00123 // set information needed in cppad_ipopt_fg_info 00124 fg_info_->set_n(n); 00125 fg_info_->set_m(m); 00126 00127 // get information from derived class version of fg_info 00128 K_ = fg_info_->number_functions(); 00129 L_.resize(K_); 00130 p_.resize(K_); 00131 q_.resize(K_); 00132 r_fun_.resize(K_); 00133 retape_.resize(K_); 00134 tape_ok_.resize(K_); 00135 pattern_jac_r_.resize(K_); 00136 pattern_hes_r_.resize(K_); 00137 size_t max_p = 0; 00138 size_t max_q = 0; 00139 for(k = 0; k < K_; k++) 00140 { L_[k] = fg_info_->number_terms(k); 00141 p_[k] = fg_info_->range_size(k); 00142 q_[k] = fg_info_->domain_size(k); 00143 retape_[k] = fg_info_->retape(k); 00144 max_p = std::max(max_p, p_[k]); 00145 max_q = std::max(max_q, q_[k]); 00146 pattern_jac_r_[k].resize( p_[k] * q_[k] ); 00147 pattern_hes_r_[k].resize( q_[k] * q_[k] ); 00148 } 00149 I_.resize(max_p); 00150 J_.resize(max_q); 00151 # ifndef NDEBUG 00152 size_t i, j, ell; 00153 // check for valid range and domain indices 00154 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00155 { 00156 for( i = 0; i < p_[k]; i++) 00157 I_[i] = m+1; // an invalid range index 00158 for( j = 0; j < q_[k]; j++) 00159 J_[j] = n; // an invalid domain index 00160 fg_info_->index(k, ell, I_, J_); 00161 for( i = 0; i < p_[k]; i++) if( I_[i] > m ) 00162 { std::cerr << "k=" << k << ", ell=" << ell 00163 << ", I[" << i << "]=" << I_[i] << std::endl; 00164 CPPAD_ASSERT_KNOWN( I_[i] <= m, 00165 "cppad_ipopt_nlp: invalid value in index vector I" 00166 ); 00167 } 00168 for( j = 0; j < q_[k]; j++) if( J_[j] >= n ) 00169 { std::cerr << "k=" << k << ", ell=" << ell 00170 << ", J[" << j << "]=" << J_[j] << std::endl; 00171 CPPAD_ASSERT_KNOWN( J_[j] < n, 00172 "cppad_ipopt_nlp: invalid value in index vector J" 00173 ); 00174 } 00175 } 00176 # endif 00177 // record r[k] for functions that do not need retaping 00178 for(k = 0; k < K_; k++) 00179 { tape_ok_[k] = false; 00180 if( ! retape_[k] ) 00181 { // Operation sequence does not depend on value 00182 // of u so record it once here in the constructor. 00183 fg_info_->index(k, 0, I_, J_); 00184 fun_record( 00185 fg_info_ , // inputs 00186 k , 00187 p_ , 00188 q_ , 00189 n_ , 00190 x_i_ , 00191 J_ , 00192 r_fun_ // output 00193 ); 00194 tape_ok_[k] = true; 00195 } 00196 } 00197 00198 // compute a sparsity patterns for each r_k (u) 00199 vec_fun_pattern( 00200 K_, p_, q_, retape_, r_fun_, // inputs 00201 pattern_jac_r_, pattern_hes_r_ // outputs 00202 ); 00203 00204 // mapping from (i,j) to Ipopt sparsity index for Jacobian of g 00205 jac_g_map( 00206 fg_info_, m_, n_, K_, L_, p_, q_, pattern_jac_r_, // inputs 00207 I_, J_, // work 00208 index_jac_g_ // outputs 00209 ); 00210 00211 // mapping from (i,j) to Ipopt sparsity index for Hessian of Lagragian 00212 hes_fg_map( 00213 fg_info_, m_, n_, K_, L_, p_, q_, pattern_hes_r_, // inputs 00214 I_, J_, // work 00215 index_hes_fg_ // outputs 00216 ); 00217 00218 // Compute Ipopt sparsity structure for Jacobian of g 00219 sparse_map2vec( 00220 index_jac_g_, // inputs 00221 nnz_jac_g_, iRow_jac_g_, jCol_jac_g_ // outputs 00222 ); 00223 00224 // Compute Ipopt sparsity structure for Hessian of Lagragian 00225 sparse_map2vec( 00226 index_hes_fg_, // inputs 00227 nnz_h_lag_, iRow_h_lag_, jCol_h_lag_ // outputs 00228 ); 00229 } 00230 00231 /// The destructor takes no special action. 00232 cppad_ipopt_nlp::~cppad_ipopt_nlp() 00233 {} 00234 00235 /*! 00236 Return dimension information about optimization problem. 00237 00238 \param[out] n 00239 is set to the value \c n_. 00240 00241 \param[out] m 00242 is set to the value \c m_. 00243 00244 \param[out] nnz_jac_g 00245 is set to the value of \c nnz_jac_g_. 00246 00247 \param[out] nnz_h_lag 00248 is set to the vlaue of \c nnz_h_lag_. 00249 00250 \param[out] index_style 00251 is set to C_STYLE; i.e., zeoro based indexing is used in the 00252 information passed to Ipopt. 00253 */ 00254 bool cppad_ipopt_nlp::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g, 00255 Index& nnz_h_lag, IndexStyleEnum& index_style) 00256 { 00257 n = n_; 00258 m = m_; 00259 nnz_jac_g = nnz_jac_g_; 00260 nnz_h_lag = nnz_h_lag_; 00261 00262 // use the fortran index style for row/col entries 00263 index_style = C_STYLE; 00264 00265 return true; 00266 } 00267 00268 /*! 00269 Return bound information about optimization problem. 00270 00271 \param[in] n 00272 is the dimension of the domain space for f(x) and g(x); i.e., 00273 it must be equal to \c n_. 00274 00275 \param[out] x_l 00276 is a vector of size \c n. 00277 The input value of its elements does not matter. 00278 On output, it is a copy of the lower bound for \f$ x \f$; i.e., 00279 \c x_l_. 00280 00281 \param[out] x_u 00282 is a vector of size \c n. 00283 The input value of its elements does not matter. 00284 On output, it is a copy of the upper bound for \f$ x \f$; i.e., 00285 \c x_u_. 00286 00287 \param[in] m 00288 is the dimension of the domain space for f(x) and g(x). i.e., 00289 it must be equal to \c m_. 00290 00291 \param[out] g_l 00292 is a vector of size \c m. 00293 The input value of its elements does not matter. 00294 On output, it is a copy of the lower bound for \f$ g(x) \f$; i.e., \c g_l_. 00295 00296 \param[out] g_u 00297 is a vector of size \c m. 00298 The input value of its elements does not matter. 00299 On output, it is a copy of the upper bound for \f$ g(x) \f$; i.e, \c g_u_. 00300 */ 00301 bool cppad_ipopt_nlp::get_bounds_info(Index n, Number* x_l, Number* x_u, 00302 Index m, Number* g_l, Number* g_u) 00303 { size_t i, j; 00304 // here, the n and m we gave IPOPT in get_nlp_info are passed back 00305 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_); 00306 CPPAD_ASSERT_UNKNOWN(size_t(m) == m_); 00307 00308 // pass back bounds 00309 for(j = 0; j < n_; j++) 00310 { x_l[j] = x_l_[j]; 00311 x_u[j] = x_u_[j]; 00312 } 00313 for(i = 0; i < m_; i++) 00314 { g_l[i] = g_l_[i]; 00315 g_u[i] = g_u_[i]; 00316 } 00317 00318 return true; 00319 } 00320 00321 /*! 00322 Return initial x value where optimiation is started. 00323 00324 \param[in] n 00325 must be equal to the domain dimension for f(x) and g(x); i.e., 00326 it must be equal to \c n_. 00327 00328 \param[in] init_x 00329 must be equal to true. 00330 00331 \param[out] x 00332 is a vector of size \c n. 00333 The input value of its elements does not matter. 00334 On output, it is a copy of the initial value for \f$ x \f$; i.e. \c x_i_. 00335 00336 \param[in] init_z 00337 must be equal to false. 00338 00339 \param z_L 00340 is not used. 00341 00342 \param z_U 00343 is not used. 00344 00345 \param[in] m 00346 must be equal to the domain dimension for f(x) and g(x); i.e., 00347 it must be equal to \c m_. 00348 00349 \param init_lambda 00350 must be equal to false. 00351 00352 \param lambda 00353 is not used. 00354 */ 00355 bool cppad_ipopt_nlp::get_starting_point(Index n, bool init_x, Number* x, 00356 bool init_z, Number* z_L, Number* z_U, 00357 Index m, bool init_lambda, 00358 Number* lambda) 00359 { size_t j; 00360 00361 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00362 CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ ); 00363 CPPAD_ASSERT_UNKNOWN(init_x == true); 00364 CPPAD_ASSERT_UNKNOWN(init_z == false); 00365 CPPAD_ASSERT_UNKNOWN(init_lambda == false); 00366 00367 for(j = 0; j < n_; j++) 00368 x[j] = x_i_[j]; 00369 00370 return true; 00371 } 00372 00373 /*! 00374 Evaluate the objective fucntion f(x). 00375 00376 \param[in] n 00377 is the dimension of the argument space for f(x); i.e., must be equal \c n_. 00378 00379 \param[in] x 00380 is a vector of size \c n containing the point at which to evaluate 00381 the function f(x). 00382 00383 \param[in] new_x 00384 is true if the previous call to any one of the 00385 \ref Evaluation_Methods used the same value for \c x. 00386 00387 \param[out] obj_value 00388 is the value of the objective f(x) at this value of \c x. 00389 00390 \return 00391 The return value is always true; see \ref Evaluation_Methods. 00392 00393 \par Efficiency 00394 This routine could be more efficient 00395 (for certain when when L[k] > 1 and retape[k] is true) 00396 if the users also provided a version 00397 of the function <tt>fg_info->eval_r(k, u)</tt> where \c u was of type 00398 \c NumberVector. 00399 */ 00400 bool cppad_ipopt_nlp::eval_f( 00401 Index n, const Number* x, bool new_x, Number& obj_value 00402 ) 00403 { 00404 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00405 00406 size_t iobj, j, k, ell; 00407 00408 // initialize summation 00409 obj_value = 0.; 00410 00411 // update tape_ok_ flag 00412 for(k = 0; k < K_; k++) 00413 { if( new_x && retape_[k] ) 00414 tape_ok_[k] = false; 00415 } 00416 00417 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00418 { fg_info_->index(k, ell, I_, J_); 00419 for(iobj = 0; iobj < p_[k]; iobj++) if( I_[iobj] == 0 ) 00420 { if( ! tape_ok_[k] ) 00421 { // Record r_k for value of u corresponding to x 00422 fun_record( 00423 fg_info_ , // inputs 00424 k , 00425 p_ , 00426 q_ , 00427 n_ , 00428 x , 00429 J_ , 00430 r_fun_ // output 00431 ); 00432 if( retape_[k] ) 00433 tape_ok_[k] = L_[k] <= 1; 00434 else tape_ok_[k] = true; 00435 } 00436 NumberVector u(q_[k]); 00437 NumberVector r(p_[k]); 00438 for(j = 0; j < q_[k]; j++) 00439 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00440 u[j] = x[ J_[j] ]; 00441 } 00442 r = r_fun_[k].Forward(0, u); 00443 obj_value += r[iobj]; 00444 } 00445 } 00446 # if CPPAD_IPOPT_NLP_TRACE 00447 using std::printf; 00448 for(j = 0; j < n_; j++) 00449 printf("cppad_ipopt_nlp::eval_f::x[%d] = %20.14g\n", j, x[j]); 00450 printf("cppad_ipopt_nlp::eval_f::obj_value = %20.14g\n", obj_value); 00451 # endif 00452 return true; 00453 } 00454 00455 /*! 00456 Evaluate the gradient of f(x). 00457 00458 \param[in] n 00459 is the dimension of the argument space for f(x); i.e., must be equal \c n_. 00460 00461 \param[in] x 00462 has a vector of size \c n containing the point at which to evaluate 00463 the gradient of f(x). 00464 00465 \param[in] new_x 00466 is true if the previous call to any one of the 00467 \ref Evaluation_Methods used the same value for \c x. 00468 00469 \param[out] grad_f 00470 is a vector of size \c n. 00471 The input value of its elements does not matter. 00472 The output value of its elements is the gradient of f(x) 00473 at this value of. 00474 00475 \return 00476 The return value is always true; see \ref Evaluation_Methods. 00477 */ 00478 bool cppad_ipopt_nlp::eval_grad_f( 00479 Index n, const Number* x, bool new_x, Number* grad_f 00480 ) 00481 { CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00482 00483 size_t iobj, i, j, k, ell; 00484 00485 // initialize summation 00486 for(j = 0; j < n_; j++) 00487 grad_f[j] = 0.; 00488 00489 // update tape_ok_ flag 00490 for(k = 0; k < K_; k++) 00491 { if( new_x && retape_[k] ) 00492 tape_ok_[k] = false; 00493 } 00494 00495 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00496 { fg_info_->index(k, ell, I_, J_); 00497 for(iobj = 0; iobj < p_[k]; iobj++) if( I_[iobj] == 0 ) 00498 { if( ! tape_ok_[k] ) 00499 { // Record r_k for value of u corresponding to x 00500 fun_record( 00501 fg_info_ , // inputs 00502 k , 00503 p_ , 00504 q_ , 00505 n_ , 00506 x , 00507 J_ , 00508 r_fun_ // output 00509 ); 00510 if( retape_[k] ) 00511 tape_ok_[k] = L_[k] <= 1; 00512 else tape_ok_[k] = true; 00513 } 00514 NumberVector u(q_[k]); 00515 NumberVector w(p_[k]); 00516 NumberVector r_grad(q_[k]); 00517 for(j = 0; j < q_[k]; j++) 00518 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00519 u[j] = x[ J_[j] ]; 00520 } 00521 r_fun_[k].Forward(0, u); 00522 for(i = 0; i < p_[k]; i++) 00523 w[i] = 0.; 00524 w[iobj] = 1.; 00525 r_grad = r_fun_[k].Reverse(1, w); 00526 for(j = 0; j < q_[k]; j++) 00527 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00528 grad_f[ J_[j] ] += r_grad[j]; 00529 } 00530 } 00531 } 00532 # if CPPAD_IPOPT_NLP_TRACE 00533 using std::printf; 00534 for(j = 0; j < n_; j++) printf( 00535 "cppad_ipopt_nlp::eval_grad_f::x[%d] = %20.14g\n", j, x[j] 00536 ); 00537 for(j = 0; j < n_; j++) printf( 00538 "cppad_ipopt_nlp::eval_grad_f::grad_f[%d] = %20.14g\n", j, grad_f[j] 00539 ); 00540 # endif 00541 return true; 00542 } 00543 00544 /*! 00545 Evaluate the function g(x). 00546 00547 \param[in] n 00548 is the dimension of the argument space for g(x); i.e., must be equal \c n_. 00549 00550 \param[in] x 00551 has a vector of size \c n containing the point at which to evaluate 00552 the gradient of g(x). 00553 00554 \param[in] new_x 00555 is true if the previous call to any one of the 00556 \ref Evaluation_Methods used the same value for \c x. 00557 00558 \param[in] m 00559 is the dimension of the range space for g(x); i.e., must be equal to \c m_. 00560 00561 \param[out] g 00562 is a vector of size \c m. 00563 The input value of its elements does not matter. 00564 The output value of its elements is 00565 the value of the function g(x) at this value of \c x. 00566 00567 \return 00568 The return value is always true; see \ref Evaluation_Methods. 00569 */ 00570 bool cppad_ipopt_nlp::eval_g( 00571 Index n, const Number* x, bool new_x, Index m, Number* g 00572 ) 00573 { CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00574 00575 size_t i, j, k, ell; 00576 00577 // initialize summation 00578 for(i = 0; i < m_; i++) 00579 g[i] = 0.; 00580 00581 // update tape_ok_ flag 00582 for(k = 0; k < K_; k++) 00583 { if( new_x && retape_[k] ) 00584 tape_ok_[k] = false; 00585 } 00586 00587 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00588 { fg_info_->index(k, ell, I_, J_); 00589 if( ! tape_ok_[k] ) 00590 { // Record r_k for value of u corresponding to x 00591 fun_record( 00592 fg_info_ , // inputs 00593 k , 00594 p_ , 00595 q_ , 00596 n_ , 00597 x , 00598 J_ , 00599 r_fun_ // output 00600 ); 00601 } 00602 if( retape_[k] ) 00603 tape_ok_[k] = L_[k] <= 1; 00604 else tape_ok_[k] = true; 00605 NumberVector u(q_[k]); 00606 NumberVector r(p_[k]); 00607 for(j = 0; j < q_[k]; j++) 00608 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00609 u[j] = x[ J_[j] ]; 00610 } 00611 r = r_fun_[k].Forward(0, u); 00612 for(i = 0; i < p_[k]; i++) 00613 { CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ ); 00614 if( I_[i] >= 1 ) 00615 g[ I_[i] - 1 ] += r[i]; 00616 } 00617 } 00618 # if CPPAD_IPOPT_NLP_TRACE 00619 using std::printf; 00620 for(j = 0; j < n_; j++) 00621 printf("cppad_ipopt_nlp::eval_g::x[%d] = %20.14g\n", j, x[j]); 00622 for(i = 0; i < m_; i++) 00623 printf("cppad_ipopt_nlp::eval_g::g[%d] = %20.14g\n", i, g[i]); 00624 # endif 00625 return true; 00626 } 00627 00628 /*! 00629 Evaluate the Jacobian of g(x). 00630 00631 \param[in] n 00632 is the dimension of the argument space for g(x); i.e., must be equal \c n_. 00633 00634 \param x 00635 if \c values is not \c NULL, 00636 \c x is a vector of size \c n containing the point at which to evaluate 00637 the gradient of g(x). 00638 00639 \param[in] new_x 00640 is true if the previous call to any one of the 00641 \ref Evaluation_Methods used the same value for \c x. 00642 00643 \param[in] m 00644 is the dimension of the range space for g(x); i.e., must be equal to \c m_. 00645 00646 \param[in] nele_jac 00647 is the number of possibly non-zero elements in the Jacobian of g(x); 00648 i.e., must be equal to \c nnz_jac_g_. 00649 00650 \param iRow 00651 if \c values is not \c NULL, \c iRow is not defined. 00652 if \c values is \c NULL, \c iRow 00653 is a vector with size \c nele_jac. 00654 The input value of its elements does not matter. 00655 On output, 00656 For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the 00657 base zero row index for the 00658 k-th possibly non-zero entry in the Jacobian of g(x). 00659 00660 \param jCol 00661 if \c values is not \c NULL, \c jCol is not defined. 00662 if \c values is \c NULL, \c jCol 00663 is a vector with size \c nele_jac. 00664 The input value of its elements does not matter. 00665 On output, 00666 For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the 00667 base zero column index for the 00668 k-th possibly non-zero entry in the Jacobian of g(x). 00669 00670 \param values 00671 if \c values is not \c NULL, \c values 00672 is a vector with size \c nele_jac. 00673 The input value of its elements does not matter. 00674 On output, 00675 For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the 00676 value for the 00677 k-th possibly non-zero entry in the Jacobian of g(x). 00678 00679 \return 00680 The return value is always true; see \ref Evaluation_Methods. 00681 */ 00682 bool cppad_ipopt_nlp::eval_jac_g(Index n, const Number* x, bool new_x, 00683 Index m, Index nele_jac, Index* iRow, Index *jCol, 00684 Number* values) 00685 { CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ ); 00686 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00687 CPPAD_ASSERT_UNKNOWN( size_t(nele_jac) == nnz_jac_g_ ); 00688 00689 size_t i, j, k, ell, l; 00690 std::map<size_t,size_t>::iterator index_ij; 00691 00692 00693 if (values == NULL) 00694 { for(k = 0; k < nnz_jac_g_; k++) 00695 { iRow[k] = iRow_jac_g_[k]; 00696 jCol[k] = jCol_jac_g_[k]; 00697 } 00698 return true; 00699 } 00700 00701 // initialize summation 00702 l = nnz_jac_g_; 00703 while(l--) 00704 values[l] = 0.; 00705 00706 // update tape_ok_ flag 00707 for(k = 0; k < K_; k++) 00708 { if( new_x && retape_[k] ) 00709 tape_ok_[k] = false; 00710 } 00711 00712 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00713 { fg_info_->index(k, ell, I_, J_); 00714 if( ! tape_ok_[k] ) 00715 { // Record r_k for value of u corresponding to x 00716 fun_record( 00717 fg_info_ , // inputs 00718 k , 00719 p_ , 00720 q_ , 00721 n_ , 00722 x , 00723 J_ , 00724 r_fun_ // output 00725 ); 00726 } 00727 if( retape_[k] ) 00728 tape_ok_[k] = L_[k] <= 1; 00729 else tape_ok_[k] = true; 00730 NumberVector u(q_[k]); 00731 NumberVector jac_r(p_[k] * q_[k]); 00732 for(j = 0; j < q_[k]; j++) 00733 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00734 u[j] = x[ J_[j] ]; 00735 } 00736 if( retape_[k] ) 00737 jac_r = r_fun_[k].Jacobian(u); 00738 else jac_r = r_fun_[k].SparseJacobian(u, pattern_jac_r_[k]); 00739 for(i = 0; i < p_[k]; i++) if( I_[i] != 0 ) 00740 { CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ ); 00741 for(j = 0; j < q_[k]; j++) 00742 { index_ij = index_jac_g_[I_[i]-1].find(J_[j]); 00743 if( index_ij != index_jac_g_[I_[i]-1].end() ) 00744 { l = index_ij->second; 00745 values[l] += jac_r[i * q_[k] + j]; 00746 } 00747 else CPPAD_ASSERT_UNKNOWN( 00748 jac_r[i * q_[k] + j] == 0. 00749 ); 00750 } 00751 } 00752 } 00753 return true; 00754 } 00755 00756 /*! 00757 Evaluate the Hessian of the Lagragian 00758 00759 \section The_Hessian_of_the_Lagragian The Hessian of the Lagragian 00760 The Hessian of the Lagragian is defined as 00761 \f[ 00762 H(x, \sigma, \lambda ) 00763 = 00764 \sigma \nabla^2 f(x) + \sum_{i=0}^{m-1} \lambda_i \nabla^2 g(x)_i 00765 \f] 00766 00767 \param[in] n 00768 is the dimension of the argument space for g(x); i.e., must be equal \c n_. 00769 00770 \param x 00771 if \c values is not \c NULL, \c x 00772 is a vector of size \c n containing the point at which to evaluate 00773 the gradient of g(x). 00774 00775 \param[in] new_x 00776 is true if the previous call to any one of the 00777 \ref Evaluation_Methods used the same value for \c x. 00778 00779 \param[in] obj_factor 00780 the value \f$ \sigma \f$ multiplying the Hessian of 00781 f(x) in the expression for \ref The_Hessian_of_the_Lagragian. 00782 00783 \param[in] m 00784 is the dimension of the range space for g(x); i.e., must be equal to \c m_. 00785 00786 \param[in] lambda 00787 if \c values is not \c NULL, \c lambda 00788 is a vector of size \c m specifing the value of \f$ \lambda \f$ 00789 in the expression for \ref The_Hessian_of_the_Lagragian. 00790 00791 \param[in] new_lambda 00792 is true if the previous call to \c eval_h had the same value for 00793 \c lambda and false otherwise. 00794 (Not currently used.) 00795 00796 \param[in] nele_hess 00797 is the number of possibly non-zero elements in the Hessian of the Lagragian; 00798 i.e., must be equal to \c nnz_h_lag_. 00799 00800 \param iRow 00801 if \c values is not \c NULL, \c iRow is not defined. 00802 if \c values is \c NULL, \c iRow 00803 is a vector with size \c nele_jac. 00804 The input value of its elements does not matter. 00805 On output, 00806 For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the 00807 base zero row index for the 00808 k-th possibly non-zero entry in the Jacobian of g(x). 00809 00810 \param jCol 00811 if \c values is not \c NULL, \c jCol is not defined. 00812 if \c values is \c NULL, \c jCol 00813 is a vector with size \c nele_jac. 00814 The input value of its elements does not matter. 00815 On output, 00816 For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the 00817 base zero column index for the 00818 k-th possibly non-zero entry in the Jacobian of g(x). 00819 00820 \param values 00821 if \c values is not \c NULL, it 00822 is a vector with size \c nele_jac. 00823 The input value of its elements does not matter. 00824 On output, 00825 For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the 00826 value for the 00827 k-th possibly non-zero entry in the Jacobian of g(x). 00828 00829 \return 00830 The return value is always true; see \ref Evaluation_Methods. 00831 */ 00832 bool cppad_ipopt_nlp::eval_h(Index n, const Number* x, bool new_x, 00833 Number obj_factor, Index m, const Number* lambda, 00834 bool new_lambda, Index nele_hess, Index* iRow, 00835 Index* jCol, Number* values) 00836 { CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ ); 00837 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 00838 00839 size_t i, j, k, ell, l; 00840 std::map<size_t,size_t>::iterator index_ij; 00841 00842 if (values == NULL) 00843 { for(k = 0; k < nnz_h_lag_; k++) 00844 { iRow[k] = iRow_h_lag_[k]; 00845 jCol[k] = jCol_h_lag_[k]; 00846 } 00847 return true; 00848 } 00849 00850 // initialize summation 00851 l = nnz_h_lag_; 00852 while(l--) 00853 values[l] = 0.; 00854 00855 // update tape_ok_ flag 00856 for(k = 0; k < K_; k++) 00857 { if( new_x && retape_[k] ) 00858 tape_ok_[k] = false; 00859 } 00860 00861 for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++) 00862 { fg_info_->index(k, ell, I_, J_); 00863 bool in_use = false; 00864 for(i = 0; i < p_[k]; i++) 00865 { if( I_[i] == 0 ) 00866 in_use |= obj_factor > 0.; 00867 else in_use |= lambda[ I_[i] - 1 ] > 0; 00868 } 00869 if( in_use ) 00870 { 00871 if( ! tape_ok_[k] ) 00872 { // Record r_k for value of u corresponding to x 00873 fun_record( 00874 fg_info_ , // inputs 00875 k , 00876 p_ , 00877 q_ , 00878 n_ , 00879 x , 00880 J_ , 00881 r_fun_ // output 00882 ); 00883 if( retape_[k] ) 00884 tape_ok_[k] = L_[k] <= 1; 00885 else tape_ok_[k] = true; 00886 } 00887 NumberVector w(p_[k]); 00888 NumberVector r_hes(q_[k] * q_[k]); 00889 NumberVector u(q_[k]); 00890 for(j = 0; j < q_[k]; j++) 00891 { CPPAD_ASSERT_UNKNOWN( J_[j] < n_ ); 00892 u[j] = x[ J_[j] ]; 00893 } 00894 for(i = 0; i < p_[k]; i++) 00895 { CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ ); 00896 if( I_[i] == 0 ) 00897 w[i] = obj_factor; 00898 else w[i] = lambda[ I_[i] - 1 ]; 00899 } 00900 if( retape_[k] ) 00901 r_hes = r_fun_[k].Hessian(u, w); 00902 else r_hes = r_fun_[k].SparseHessian( 00903 u, w, pattern_hes_r_[k] 00904 ); 00905 for(i = 0; i < q_[k]; i++) for(j = 0; j < q_[k]; j++) 00906 if( J_[j] <= J_[i] ) 00907 { index_ij = index_hes_fg_[J_[i]].find(J_[j]); 00908 if( index_ij != index_hes_fg_[J_[i]].end() ) 00909 { l = index_ij->second; 00910 values[l] += r_hes[i * q_[k] + j]; 00911 } 00912 else CPPAD_ASSERT_UNKNOWN( 00913 r_hes[i * q_[k] + j] == 0. 00914 ); 00915 } 00916 } 00917 } 00918 return true; 00919 } 00920 00921 /*! 00922 Pass solution information from Ipopt to users solution structure. 00923 00924 \param[in] status 00925 is value that the Ipopt solution status 00926 which gets mapped to a correponding value for 00927 \n 00928 <tt>solution_->status</tt>. 00929 00930 \param[in] n 00931 is the dimension of the domain space for f(x) and g(x); i.e., 00932 it must be equal to \c n_. 00933 00934 \param[in] x 00935 is a vector with size \c n specifing the final solution. 00936 \n 00937 <tt>solution_->x</tt> is set to be a vector with size \c n 00938 and to have the same element values. 00939 00940 \param[in] z_L 00941 is a vector with size \c n specifing the Lagragian multipliers for the 00942 constraint \f$ x^l \leq x \f$. 00943 \n 00944 <tt>solution_->z_l</tt> is set to be a vector with size \c n 00945 and to have the same element values. 00946 00947 \param[in] z_U 00948 is a vector with size \c n specifing the Lagragian multipliers for the 00949 constraint \f$ x \leq x^u \f$. 00950 \n 00951 <tt>solution_->z_u</tt> is set to be a vector with size \c n 00952 and to have the same element values. 00953 00954 \param[in] m 00955 is the dimension of the domain space for f(x) and g(x). i.e., 00956 it must be equal to \c m_. 00957 00958 \param[in] g 00959 is a vector with size \c m containing the value of the constraint function 00960 g(x) at the final solution for \c x. 00961 \n 00962 <tt>solution_->g</tt> is set to be a vector with size \c m 00963 and to have the same element values. 00964 00965 \param[in] lambda 00966 is a vector with size \c m specifing the Lagragian multipliers for the 00967 constraints \f$ g^l \leq g(x) \leq g^u \f$. 00968 \n 00969 <tt>solution_->lambda</tt> is set to be a vector with size \c m 00970 and to have the same element values. 00971 00972 \param[in] obj_value 00973 is the value of the objective function f(x) at the final solution for \c x. 00974 \n 00975 <tt>solution_->obj_value</tt> is set to have the same value. 00976 00977 \param[in] ip_data 00978 is unspecified (by Ipopt) and hence not used. 00979 00980 \param[in] ip_cq 00981 is unspecified (by Ipopt) and hence not used. 00982 00983 \par solution_[out] 00984 the pointer \c solution_ , which is equal to the pointer \c solution 00985 in the constructor for \c cppad_ipopt_nlp, 00986 is used to set output values (see documentation above). 00987 */ 00988 void cppad_ipopt_nlp::finalize_solution( 00989 Ipopt::SolverReturn status , 00990 Index n , 00991 const Number* x , 00992 const Number* z_L , 00993 const Number* z_U , 00994 Index m , 00995 const Number* g , 00996 const Number* lambda , 00997 Number obj_value , 00998 const Ipopt::IpoptData* ip_data , 00999 Ipopt::IpoptCalculatedQuantities* ip_cq 01000 ) 01001 { size_t i, j; 01002 01003 CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ ); 01004 CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ ); 01005 01006 switch(status) 01007 { // convert status from Ipopt enum to cppad_ipopt_solution enum 01008 case Ipopt::SUCCESS: 01009 solution_->status = 01010 cppad_ipopt_solution::success; 01011 break; 01012 01013 case Ipopt::MAXITER_EXCEEDED: 01014 solution_->status = 01015 cppad_ipopt_solution::maxiter_exceeded; 01016 break; 01017 01018 case Ipopt::STOP_AT_TINY_STEP: 01019 solution_->status = 01020 cppad_ipopt_solution::stop_at_tiny_step; 01021 break; 01022 01023 case Ipopt::STOP_AT_ACCEPTABLE_POINT: 01024 solution_->status = 01025 cppad_ipopt_solution::stop_at_acceptable_point; 01026 break; 01027 01028 case Ipopt::LOCAL_INFEASIBILITY: 01029 solution_->status = 01030 cppad_ipopt_solution::local_infeasibility; 01031 break; 01032 01033 case Ipopt::USER_REQUESTED_STOP: 01034 solution_->status = 01035 cppad_ipopt_solution::user_requested_stop; 01036 break; 01037 01038 case Ipopt::DIVERGING_ITERATES: 01039 solution_->status = 01040 cppad_ipopt_solution::diverging_iterates; 01041 break; 01042 01043 case Ipopt::RESTORATION_FAILURE: 01044 solution_->status = 01045 cppad_ipopt_solution::restoration_failure; 01046 break; 01047 01048 case Ipopt::ERROR_IN_STEP_COMPUTATION: 01049 solution_->status = 01050 cppad_ipopt_solution::error_in_step_computation; 01051 break; 01052 01053 case Ipopt::INVALID_NUMBER_DETECTED: 01054 solution_->status = 01055 cppad_ipopt_solution::invalid_number_detected; 01056 break; 01057 01058 case Ipopt::INTERNAL_ERROR: 01059 solution_->status = 01060 cppad_ipopt_solution::internal_error; 01061 break; 01062 01063 default: 01064 solution_->status = 01065 cppad_ipopt_solution::unknown; 01066 } 01067 01068 solution_->x.resize(n_); 01069 solution_->z_l.resize(n_); 01070 solution_->z_u.resize(n_); 01071 for(j = 0; j < n_; j++) 01072 { solution_->x[j] = x[j]; 01073 solution_->z_l[j] = z_L[j]; 01074 solution_->z_u[j] = z_U[j]; 01075 } 01076 solution_->g.resize(m_); 01077 solution_->lambda.resize(m_); 01078 for(i = 0; i < m_; i++) 01079 { solution_->g[i] = g[i]; 01080 solution_->lambda[i] = lambda[i]; 01081 } 01082 solution_->obj_value = obj_value; 01083 return; 01084 } 01085 01086 // This routine is defined, but not yet used 01087 // (trying to figure out a problem with Ipopt-3.9.1 and dismod4). 01088 bool cppad_ipopt_nlp::intermediate_callback( 01089 Ipopt::AlgorithmMode mode, 01090 Index iter, 01091 Number obj_value, 01092 Number inf_pr, 01093 Number inf_du, 01094 Number mu, 01095 Number d_norm, 01096 Number regularization_size, 01097 Number alpha_du, 01098 Number alpha_pr, 01099 Index ls_trials, 01100 const Ipopt::IpoptData* ip_data, 01101 Ipopt::IpoptCalculatedQuantities* ip_cq 01102 ) 01103 { 01104 // std::cout << "intermediate_callback called" << std::endl; 01105 return true; 01106 }