CppAD: A C++ Algorithmic Differentiation Package  20130102
cppad_ipopt_nlp.cpp
Go to the documentation of this file.
00001 /* $Id$ */
00002 /* --------------------------------------------------------------------------
00003 CppAD: C++ Algorithmic Differentiation: Copyright (C) 2003-12 Bradley M. Bell
00004 
00005 CppAD is distributed under multiple licenses. This distribution is under
00006 the terms of the 
00007                     Eclipse Public License Version 1.0.
00008 
00009 A copy of this license is included in the COPYING file of this distribution.
00010 Please visit http://www.coin-or.org/CppAD/ for information on other licenses.
00011 -------------------------------------------------------------------------- */
00012 # include <limits>
00013 
00014 # include "cppad_ipopt_nlp.hpp"
00015 # include "sparse_map2vec.hpp"
00016 # include "jac_g_map.hpp"
00017 # include "hes_fg_map.hpp"
00018 # include "vec_fun_pattern.hpp"
00019 # include "fun_record.hpp"
00020 
00021 
00022 /// If 0 tracing is off, otherwise tracing is on.
00023 # define  CPPAD_IPOPT_NLP_TRACE 0
00024 
00025 # if CPPAD_IPOPT_NLP_TRACE
00026 # include <cstdio>
00027 # endif
00028 
00029 // ---------------------------------------------------------------------------
00030 namespace cppad_ipopt {
00031 // ---------------------------------------------------------------------------
00032 /*!
00033 \defgroup cppad_ipopt_nlp_cpp cppad_ipopt_nlp.cpp
00034 \{
00035 \file cppad_ipopt_nlp.cpp
00036 \brief Member functions for the cppad_ipopt_nlp class.
00037 */
00038 
00039 
00040 /*! 
00041 Constructor for the \ref Nonlinear_Programming_Problem.
00042 
00043 \param n
00044 dimension of the domain space for f(x) and g(x).
00045 
00046 \param m
00047 dimension of the range space for g(x)
00048 
00049 \param x_i
00050 initial value of x during the optimization procedure (size n).
00051 
00052 \param x_l
00053 lower limit for x (size n).
00054 
00055 \param x_u
00056 upper limit for x (size n).
00057 
00058 \param g_l
00059 lower limit for g(x) (size m).
00060 
00061 \param g_u
00062 upper limit for g(x) (size m).
00063 
00064 \param fg_info 
00065 pointer to base class version of derived class object used to get 
00066 information about the user's representation for f(x) and g(x).
00067 (The object pointed to must not be deleted before this cppad_ipopt_nlp object).
00068 
00069 \param solution
00070 pointer to object where final results are stored.
00071 (The object pointed to must not be deleted before this cppad_ipopt_nlp object).
00072 
00073 \par Constants
00074 The following values are set by the constructor and are \c const
00075 or effectively \c const; i.e., they are set by the constructor and should
00076 not be changed:
00077 \verbatim
00078      n_, m_, x_i_, x_l_, x_u_, g_l_, g_u_, K_, L_, p_, q_, retape_,
00079      pattern_jac_r_, pattern_hes_r_, index_jac_g_, index_hes_fg_,
00080      nnz_jac_g_, iRow_jac_g_, jCol_jac_g_,
00081      nnz_h_lag_, iRow_h_lag_, jCol_h_lag_,
00082 \endverbatim
00083 In addition, the function calls <tt>fg_info->set_n(n)</tt>
00084 and <tt>fg_info->set_m(m)</tt> are used to set the values of \c n
00085 and \c m in \c fg_info. 
00086 
00087 \par Variables
00088 The following arrays have fixed size which is set during this constructor:
00089 
00090 \li \c tape_ok_ has size \c K_. It is initialized as true for indices
00091 \c k such that <tt>retape[k]</tt> is false.  
00092 
00093 \li \c r_fun_ has size \c K_. It is initilaize with the default
00094 \c ADFun constructor. Then, for indices \c k such that 
00095 <tt>retape[k]</tt> is false, the operation sequence corresponding
00096 to \f$ r_k (u) \f$ is stored in <tt>r_fun_[k]</tt>.
00097 
00098 \li \c I_ has size equal to the maximum of <tt>p[k]</tt> w.r.t \c k.
00099 
00100 \li \c J_ has size equal to the maximum of <tt>q[k]</tt> w.r.t \c k.
00101 
00102 \par NDEBUG
00103 If the preprocessor symbol \c NEBUG is not defined,
00104 certain of the assumptions about the function calls of the form
00105 \verbatim
00106      fg_info->index(k, ell, I, J)
00107 \endverbatim
00108 are checked to make sure they hold.
00109 */
00110 cppad_ipopt_nlp::cppad_ipopt_nlp(
00111      size_t n                         , 
00112      size_t m                         ,
00113      const NumberVector    &x_i       ,
00114      const NumberVector    &x_l       ,
00115      const NumberVector    &x_u       ,
00116      const NumberVector    &g_l       ,
00117      const NumberVector    &g_u       ,
00118      cppad_ipopt_fg_info*   fg_info   ,
00119      cppad_ipopt_solution*  solution )
00120      : n_ ( n ),
00121        m_ ( m ),
00122        x_i_ ( x_i ),
00123        x_l_ ( x_l ),
00124        x_u_ ( x_u ),
00125        g_l_ ( g_l ),
00126        g_u_ ( g_u ),
00127        fg_info_ ( fg_info ) ,
00128        solution_ (solution) ,
00129        infinity_ ( std::numeric_limits<Number>::infinity() )
00130 {    size_t k;
00131 
00132      // set information needed in cppad_ipopt_fg_info
00133      fg_info_->set_n(n);
00134      fg_info_->set_m(m);
00135 
00136      // get information from derived class version of fg_info
00137      K_ = fg_info_->number_functions();
00138      L_.resize(K_);
00139      p_.resize(K_);
00140      q_.resize(K_);
00141      r_fun_.resize(K_);
00142      retape_.resize(K_);
00143      tape_ok_.resize(K_);
00144      pattern_jac_r_.resize(K_);
00145      pattern_hes_r_.resize(K_);
00146      size_t max_p      = 0;
00147      size_t max_q      = 0;
00148      for(k = 0; k < K_; k++)
00149      {    L_[k]       = fg_info_->number_terms(k);
00150           p_[k]       = fg_info_->range_size(k);
00151           q_[k]       = fg_info_->domain_size(k);
00152           retape_[k]  = fg_info_->retape(k);
00153           max_p       = std::max(max_p, p_[k]);
00154           max_q       = std::max(max_q, q_[k]);
00155           pattern_jac_r_[k].resize( p_[k] * q_[k] );
00156           pattern_hes_r_[k].resize( q_[k] * q_[k] );
00157      }
00158      I_.resize(max_p);
00159      J_.resize(max_q);
00160 # ifndef NDEBUG
00161      size_t i, j, ell;
00162      // check for valid range and domain indices
00163      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00164      {
00165           for( i = 0; i < p_[k]; i++)
00166                I_[i] = m+1; // an invalid range index
00167           for( j = 0; j < q_[k]; j++)
00168                J_[j] = n; // an invalid domain index
00169           fg_info_->index(k, ell, I_, J_);   
00170           for( i = 0; i < p_[k]; i++) if( I_[i] > m )
00171           {    std::cerr << "k=" << k << ", ell=" << ell 
00172                << ", I[" << i << "]=" << I_[i] << std::endl;
00173                CPPAD_ASSERT_KNOWN( I_[i] <= m,
00174                "cppad_ipopt_nlp: invalid value in index vector I"
00175                );
00176           }
00177           for( j = 0; j < q_[k]; j++) if( J_[j] >= n )
00178           {    std::cerr << "k=" << k << ", ell=" << ell 
00179                << ", J[" << j << "]=" << J_[j] << std::endl;
00180                CPPAD_ASSERT_KNOWN( J_[j] < n,
00181                "cppad_ipopt_nlp: invalid value in index vector J"
00182                );
00183           }
00184      }
00185 # endif
00186      // record r[k] for functions that do not need retaping
00187      for(k = 0; k < K_; k++)
00188      {    tape_ok_[k] = false;
00189           if( ! retape_[k] )
00190           {    // Operation sequence does not depend on value 
00191                // of u so record it once here in the constructor.
00192                fg_info_->index(k, 0, I_, J_);
00193                fun_record(
00194                     fg_info_        ,   // inputs
00195                     k               ,
00196                     p_              ,
00197                     q_              ,
00198                     n_              ,
00199                     x_i_            ,
00200                     J_              ,
00201                     r_fun_              // output
00202                );
00203                // take time to optimize because only recording once
00204                r_fun_[k].optimize();
00205                // ok and will stay that way
00206                tape_ok_[k] = true;
00207           }
00208      }
00209 
00210      // compute a sparsity patterns for each r_k (u)
00211      vec_fun_pattern(
00212           K_, p_, q_, retape_, r_fun_,      // inputs 
00213           pattern_jac_r_, pattern_hes_r_    // outputs
00214      );
00215 
00216      // mapping from (i,j) to Ipopt sparsity index for Jacobian of g
00217      jac_g_map(
00218           fg_info_, m_, n_, K_, L_, p_, q_, pattern_jac_r_,   // inputs
00219           I_, J_,                                             // work
00220           index_jac_g_                                        // outputs
00221      );
00222 
00223      // mapping from (i,j) to Ipopt sparsity index for Hessian of Lagragian
00224      hes_fg_map(
00225           fg_info_, m_, n_, K_, L_, p_, q_, pattern_hes_r_,   // inputs
00226           I_, J_,                                             // work
00227           index_hes_fg_                                       // outputs
00228      );
00229      
00230      // Compute Ipopt sparsity structure for Jacobian of g 
00231      sparse_map2vec(
00232           index_jac_g_,                         // inputs
00233           nnz_jac_g_, iRow_jac_g_, jCol_jac_g_  // outputs
00234      );
00235 
00236      // Compute Ipopt sparsity structure for Hessian of Lagragian
00237      sparse_map2vec(
00238           index_hes_fg_,                        // inputs
00239           nnz_h_lag_, iRow_h_lag_, jCol_h_lag_  // outputs
00240      );
00241 }
00242 
00243 /// The destructor takes no special action.
00244 cppad_ipopt_nlp::~cppad_ipopt_nlp()
00245 {}
00246 
00247 /*!
00248 Return dimension information about optimization problem.
00249 
00250 \param[out] n
00251 is set to the value \c n_.
00252 
00253 \param[out] m
00254 is set to the value \c m_.
00255 
00256 \param[out] nnz_jac_g
00257 is set to the value of \c nnz_jac_g_.
00258 
00259 \param[out] nnz_h_lag
00260 is set to the vlaue of \c nnz_h_lag_.
00261 
00262 \param[out] index_style
00263 is set to C_STYLE; i.e., zeoro based indexing is used in the
00264 information passed to Ipopt.
00265 */
00266 bool cppad_ipopt_nlp::get_nlp_info(Index& n, Index& m, Index& nnz_jac_g,
00267                          Index& nnz_h_lag, IndexStyleEnum& index_style)
00268 {
00269      n = n_;
00270      m = m_;
00271      nnz_jac_g = nnz_jac_g_;
00272      nnz_h_lag = nnz_h_lag_;
00273 
00274      // use the fortran index style for row/col entries
00275      index_style = C_STYLE;
00276 
00277      return true;
00278 }
00279 
00280 /*!
00281 Return bound information about optimization problem.
00282 
00283 \param[in] n
00284 is the dimension of the domain space for f(x) and g(x); i.e.,
00285 it must be equal to \c n_.
00286 
00287 \param[out] x_l
00288 is a vector of size \c n.
00289 The input value of its elements does not matter.
00290 On output, it is a copy of the lower bound for \f$ x \f$; i.e.,
00291 \c x_l_.
00292 
00293 \param[out] x_u
00294 is a vector of size \c n.
00295 The input value of its elements does not matter.
00296 On output, it is a copy of the upper bound for \f$ x \f$; i.e.,
00297 \c x_u_.
00298 
00299 \param[in] m
00300 is the dimension of the range space for g(x). i.e.,
00301 it must be equal to \c m_.
00302 
00303 \param[out] g_l
00304 is a vector of size \c m.
00305 The input value of its elements does not matter.
00306 On output, it is a copy of the lower bound for \f$ g(x) \f$; i.e., \c g_l_.
00307 
00308 \param[out] g_u
00309 is a vector of size \c m.
00310 The input value of its elements does not matter.
00311 On output, it is a copy of the upper bound for \f$ g(x) \f$; i.e, \c g_u_.
00312 */
00313 bool cppad_ipopt_nlp::get_bounds_info(Index n, Number* x_l, Number* x_u,
00314                             Index m, Number* g_l, Number* g_u)
00315 {    size_t i, j;
00316      // here, the n and m we gave IPOPT in get_nlp_info are passed back 
00317      CPPAD_ASSERT_UNKNOWN(size_t(n) == n_);
00318      CPPAD_ASSERT_UNKNOWN(size_t(m) == m_);
00319 
00320      // pass back bounds
00321      for(j = 0; j < n_; j++)
00322      {    x_l[j] = x_l_[j];
00323           x_u[j] = x_u_[j];
00324      }
00325      for(i = 0; i < m_; i++)
00326      {    g_l[i] = g_l_[i];
00327           g_u[i] = g_u_[i];
00328      }
00329      
00330      return true;
00331 }
00332 
00333 /*!
00334 Return initial x value where optimiation is started.
00335 
00336 \param[in] n
00337 must be equal to the domain dimension for f(x) and g(x); i.e.,
00338 it must be equal to \c n_.
00339 
00340 \param[in] init_x
00341 must be equal to true.
00342 
00343 \param[out] x
00344 is a vector of size \c n.
00345 The input value of its elements does not matter.
00346 On output, it is a copy of the initial value for \f$ x \f$; i.e. \c x_i_.
00347 
00348 \param[in] init_z
00349 must be equal to false.
00350 
00351 \param z_L
00352 is not used.
00353 
00354 \param z_U
00355 is not used.
00356 
00357 \param[in] m
00358 must be equal to the range dimension for g(x); i.e.,
00359 it must be equal to \c m_.
00360 
00361 \param init_lambda
00362 must be equal to false.
00363 
00364 \param lambda
00365 is not used.
00366 */
00367 bool cppad_ipopt_nlp::get_starting_point(Index n, bool init_x, Number* x,
00368                                bool init_z, Number* z_L, Number* z_U,
00369                                Index m, bool init_lambda,
00370                                Number* lambda)
00371 {    size_t j;
00372 
00373      CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
00374      CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ );
00375      CPPAD_ASSERT_UNKNOWN(init_x == true);
00376      CPPAD_ASSERT_UNKNOWN(init_z == false);
00377      CPPAD_ASSERT_UNKNOWN(init_lambda == false);
00378 
00379      for(j = 0; j < n_; j++)
00380           x[j] = x_i_[j];
00381 
00382      return true;
00383 }
00384 
00385 /*!
00386 Evaluate the objective fucntion f(x).
00387 
00388 \param[in] n
00389 is the dimension of the argument space for f(x); i.e., must be equal \c n_.
00390 
00391 \param[in] x
00392 is a vector of size \c n containing the point at which to evaluate
00393 the function f(x).
00394 
00395 \param[in] new_x
00396 is false if the previous call to any one of the 
00397 \ref Evaluation_Methods used the same value for \c x.
00398 
00399 \param[out] obj_value
00400 is the value of the objective f(x) at this value of \c x.
00401 
00402 \return
00403 The return value is always true; see \ref Evaluation_Methods.
00404 
00405 \par Efficiency
00406 This routine could be more efficient 
00407 (for certain when when L[k] > 1 and retape[k] is true)
00408 if the users also provided a version 
00409 of the function <tt>fg_info->eval_r(k, u)</tt> where \c u was of type
00410 \c NumberVector.
00411 */
00412 bool cppad_ipopt_nlp::eval_f(
00413      Index n, const Number* x, bool new_x, Number& obj_value
00414 )
00415 {
00416      CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
00417 
00418      size_t iobj, j, k, ell;
00419 
00420      // initialize summation
00421      obj_value = 0.;
00422 
00423      // update tape_ok_ flag
00424      for(k = 0; k < K_; k++) 
00425      {    if( retape_[k] && (new_x || L_[k] > 1) )
00426                tape_ok_[k] = false;
00427      }
00428 
00429      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00430      {    fg_info_->index(k, ell, I_, J_);
00431           for(iobj = 0; iobj < p_[k]; iobj++) if( I_[iobj] == 0 )
00432           {    if( ! tape_ok_[k] )
00433                {    // Record r_k for value of u corresponding to x
00434                     fun_record(
00435                          fg_info_        ,   // inputs
00436                          k               ,
00437                          p_              ,
00438                          q_              ,
00439                          n_              ,
00440                          x               ,
00441                          J_              ,
00442                          r_fun_             // output
00443                     );
00444                     tape_ok_[k] = ! (retape_[k] || L_[k] > 1);
00445                }
00446                NumberVector u(q_[k]);
00447                NumberVector r(p_[k]);
00448                for(j = 0; j < q_[k]; j++)
00449                {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00450                     u[j]   = x[ J_[j] ];
00451                }
00452                r          = r_fun_[k].Forward(0, u);
00453                obj_value += r[iobj];
00454           }
00455      }
00456 # if CPPAD_IPOPT_NLP_TRACE
00457      using std::printf;
00458      for(j = 0; j < n_; j++)
00459           printf("cppad_ipopt_nlp::eval_f::x[%d] = %20.14g\n", j, x[j]);
00460      printf("cppad_ipopt_nlp::eval_f::obj_value = %20.14g\n", obj_value);
00461 # endif
00462 # ifndef NDEBUG
00463      CPPAD_ASSERT_KNOWN(
00464           (-infinity_ < obj_value) && (obj_value < infinity_),
00465           "cppad_ipopt_nlp::eval_f:: objective value is not finite"
00466      );
00467 # endif
00468      return true;
00469 }
00470 
00471 /*!
00472 Evaluate the gradient of f(x).
00473 
00474 \param[in] n
00475 is the dimension of the argument space for f(x); i.e., must be equal \c n_.
00476 
00477 \param[in] x
00478 has a vector of size \c n containing the point at which to evaluate
00479 the gradient of f(x).
00480 
00481 \param[in] new_x
00482 is false if the previous call to any one of the 
00483 \ref Evaluation_Methods used the same value for \c x.
00484 
00485 \param[out] grad_f
00486 is a vector of size \c n.
00487 The input value of its elements does not matter.
00488 The output value of its elements is the gradient of f(x) 
00489 at this value of.
00490 
00491 \return
00492 The return value is always true; see \ref Evaluation_Methods.
00493 */
00494 bool cppad_ipopt_nlp::eval_grad_f(
00495      Index n, const Number* x, bool new_x, Number* grad_f
00496 )
00497 {    CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
00498 
00499      size_t iobj, i, j, k, ell;
00500 
00501      // initialize summation
00502      for(j = 0; j < n_; j++)
00503           grad_f[j] = 0.;
00504 
00505      // update tape_ok_ flag
00506      for(k = 0; k < K_; k++) 
00507      {    if( retape_[k] && (new_x || L_[k] > 1) )
00508                tape_ok_[k] = false;
00509      }
00510 
00511      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00512      {    fg_info_->index(k, ell, I_, J_);
00513           for(iobj = 0; iobj < p_[k]; iobj++) if( I_[iobj] == 0 )
00514           {    if( ! tape_ok_[k] )
00515                {    // Record r_k for value of u corresponding to x
00516                     fun_record(
00517                          fg_info_        ,   // inputs
00518                          k               ,
00519                          p_              ,
00520                          q_              ,
00521                          n_              ,
00522                          x               ,
00523                          J_              ,
00524                          r_fun_              // output
00525                     );
00526                     tape_ok_[k] = ! (retape_[k] || L_[k] > 1);
00527                }
00528                NumberVector u(q_[k]);
00529                NumberVector w(p_[k]);
00530                NumberVector r_grad(q_[k]);
00531                for(j = 0; j < q_[k]; j++)
00532                {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00533                     u[j]   = x[ J_[j] ];
00534                }
00535                r_fun_[k].Forward(0, u);
00536                for(i = 0; i < p_[k]; i++)
00537                     w[i] = 0.;
00538                w[iobj]    = 1.;
00539                r_grad     = r_fun_[k].Reverse(1, w);
00540                for(j = 0; j < q_[k]; j++)
00541                {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00542                     grad_f[ J_[j] ]  += r_grad[j];
00543                }
00544           }
00545      }
00546 # if CPPAD_IPOPT_NLP_TRACE
00547      using std::printf;
00548      for(j = 0; j < n_; j++) printf(
00549      "cppad_ipopt_nlp::eval_grad_f::x[%d] = %20.14g\n", j, x[j]
00550      );
00551      for(j = 0; j < n_; j++) printf(
00552      "cppad_ipopt_nlp::eval_grad_f::grad_f[%d] = %20.14g\n", j, grad_f[j]
00553      );
00554 # endif
00555 # ifndef NDEBUG
00556      for(j = 0; j < n_; j++) CPPAD_ASSERT_KNOWN(
00557           (-infinity_ < grad_f[j]) && (grad_f[j] < infinity_),
00558           "cppad_ipopt_nlp::grad_f:: gradient of objective is not finite"
00559      );
00560 # endif
00561      return true;
00562 }
00563 
00564 /*!
00565 Evaluate the function g(x).
00566 
00567 \param[in] n
00568 is the dimension of the argument space for g(x); i.e., must be equal \c n_.
00569 
00570 \param[in] x
00571 has a vector of size \c n containing the point at which to evaluate
00572 the constraint function g(x).
00573 
00574 \param[in] new_x
00575 is false if the previous call to any one of the 
00576 \ref Evaluation_Methods used the same value for \c x.
00577 
00578 \param[in] m
00579 is the dimension of the range space for g(x); i.e., must be equal to \c m_.
00580 
00581 \param[out] g
00582 is a vector of size \c m.
00583 The input value of its elements does not matter.
00584 The output value of its elements is 
00585 the value of the function g(x) at this value of \c x.
00586 
00587 \return
00588 The return value is always true; see \ref Evaluation_Methods.
00589 */
00590 bool cppad_ipopt_nlp::eval_g(
00591      Index n, const Number* x, bool new_x, Index m, Number* g
00592 )
00593 {    CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
00594 
00595      size_t i, j, k, ell;
00596 
00597      // initialize summation
00598      for(i = 0; i < m_; i++)
00599           g[i] = 0.;
00600 
00601      // update tape_ok_ flag
00602      for(k = 0; k < K_; k++) 
00603      {    if( retape_[k] && (new_x || L_[k] > 1) )
00604                tape_ok_[k] = false;
00605      }
00606 
00607      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00608      {    fg_info_->index(k, ell, I_, J_);
00609           if( ! tape_ok_[k] )
00610           {    // Record r_k for value of u corresponding to x
00611                fun_record(
00612                     fg_info_        ,   // inputs
00613                     k               ,
00614                     p_              ,
00615                     q_              ,
00616                     n_              ,
00617                     x               ,
00618                     J_              ,
00619                     r_fun_              // output
00620                );
00621           }
00622           tape_ok_[k] = ! (retape_[k] || L_[k] > 1);
00623           NumberVector u(q_[k]);
00624           NumberVector r(p_[k]);
00625           for(j = 0; j < q_[k]; j++)
00626           {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00627                u[j]   = x[ J_[j] ];
00628           }
00629           r   = r_fun_[k].Forward(0, u);
00630           for(i = 0; i < p_[k]; i++)
00631           {    CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ );
00632                if( I_[i] >= 1 )
00633                     g[ I_[i] - 1 ] += r[i];
00634           }
00635      }
00636 # if CPPAD_IPOPT_NLP_TRACE
00637      using std::printf;
00638      for(j = 0; j < n_; j++)
00639           printf("cppad_ipopt_nlp::eval_g::x[%d] = %20.14g\n", j, x[j]);
00640      for(i = 0; i < m_; i++)
00641           printf("cppad_ipopt_nlp::eval_g::g[%d] = %20.14g\n", i, g[i]);
00642 # endif
00643 # ifndef NDEBUG
00644      for(i = 0; i < m_; i++) CPPAD_ASSERT_KNOWN(
00645           (-infinity_ < g[i]) && (g[i] < infinity_),
00646           "cppad_ipopt_nlp::eval_g:: not all constraints are not finite"
00647      );
00648 # endif
00649      return true;
00650 }
00651 
00652 /*!
00653 Evaluate the Jacobian of g(x).
00654 
00655 \param[in] n
00656 is the dimension of the argument space for g(x); i.e., must be equal \c n_.
00657 
00658 \param x
00659 if \c values is not \c NULL,
00660 \c x is a vector of size \c n containing the point at which to evaluate
00661 the gradient of g(x).
00662 
00663 \param[in] new_x
00664 is false if the previous call to any one of the 
00665 \ref Evaluation_Methods used the same value for \c x.
00666 
00667 \param[in] m
00668 is the dimension of the range space for g(x); i.e., must be equal to \c m_.
00669 
00670 \param[in] nele_jac
00671 is the number of possibly non-zero elements in the Jacobian of g(x);
00672 i.e., must be equal to \c nnz_jac_g_.
00673 
00674 \param iRow
00675 if \c values is not \c NULL, \c iRow is not defined.
00676 if \c values is \c NULL, \c iRow
00677 is a vector with size \c nele_jac.
00678 The input value of its elements does not matter.
00679 On output, 
00680 For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the 
00681 base zero row index for the 
00682 k-th possibly non-zero entry in the Jacobian of g(x).
00683 
00684 \param jCol
00685 if \c values is not \c NULL, \c jCol is not defined.
00686 if \c values is \c NULL, \c jCol
00687 is a vector with size \c nele_jac.
00688 The input value of its elements does not matter.
00689 On output, 
00690 For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the 
00691 base zero column index for the 
00692 k-th possibly non-zero entry in the Jacobian of g(x).
00693 
00694 \param values
00695 if \c values is not \c NULL, \c values
00696 is a vector with size \c nele_jac.
00697 The input value of its elements does not matter.
00698 On output, 
00699 For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the 
00700 value for the 
00701 k-th possibly non-zero entry in the Jacobian of g(x).
00702 
00703 \return
00704 The return value is always true; see \ref Evaluation_Methods.
00705 */
00706 bool cppad_ipopt_nlp::eval_jac_g(Index n, const Number* x, bool new_x,
00707                        Index m, Index nele_jac, Index* iRow, Index *jCol,
00708                        Number* values)
00709 {    CPPAD_ASSERT_UNKNOWN(size_t(m)          == m_ );
00710      CPPAD_ASSERT_UNKNOWN(size_t(n)          == n_ );
00711      CPPAD_ASSERT_UNKNOWN( size_t(nele_jac)  == nnz_jac_g_ );
00712 
00713      size_t i, j, k, ell, l;
00714      std::map<size_t,size_t>::iterator index_ij;
00715 
00716 
00717      if (values == NULL) 
00718      {    for(k = 0; k < nnz_jac_g_; k++)
00719           {    iRow[k] = iRow_jac_g_[k];
00720                jCol[k] = jCol_jac_g_[k];
00721           }
00722           return true;
00723      }
00724 
00725      // initialize summation
00726      l = nnz_jac_g_;
00727      while(l--)
00728           values[l] = 0.;
00729 
00730      // update tape_ok_ flag
00731      for(k = 0; k < K_; k++) 
00732      {    if( retape_[k] && (new_x || L_[k] > 1) )
00733                tape_ok_[k] = false;
00734      }
00735 
00736      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00737      {    fg_info_->index(k, ell, I_, J_);
00738           if( ! tape_ok_[k] )
00739           {    // Record r_k for value of u corresponding to x
00740                fun_record(
00741                     fg_info_        ,   // inputs
00742                     k               ,
00743                     p_              ,
00744                     q_              ,
00745                     n_              ,
00746                     x               ,
00747                     J_              ,
00748                     r_fun_              // output
00749                );
00750           }
00751           tape_ok_[k] = ! (retape_[k] || L_[k] > 1);
00752           NumberVector u(q_[k]);
00753           NumberVector jac_r(p_[k] * q_[k]);
00754           for(j = 0; j < q_[k]; j++)
00755           {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00756                u[j]   = x[ J_[j] ];
00757           }
00758           if( retape_[k] )
00759                jac_r = r_fun_[k].Jacobian(u);
00760           else jac_r = r_fun_[k].SparseJacobian(u, pattern_jac_r_[k]);
00761           for(i = 0; i < p_[k]; i++) if( I_[i] != 0 )
00762           {    CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ );
00763                for(j = 0; j < q_[k]; j++)
00764                {    index_ij = index_jac_g_[I_[i]-1].find(J_[j]);
00765                     if( index_ij != index_jac_g_[I_[i]-1].end() )
00766                     {    l          = index_ij->second;
00767                          values[l] += jac_r[i * q_[k] + j];
00768                     }
00769                     else CPPAD_ASSERT_UNKNOWN(
00770                          jac_r[i * q_[k] + j] == 0.
00771                     );
00772                }
00773           }
00774      }
00775 # ifndef NDEBUG
00776      for(l = 0; l < nnz_jac_g_; l++) CPPAD_ASSERT_KNOWN(
00777           (-infinity_ < values[l]) && (values[l] < infinity_),
00778           "cppad_ipopt_nlp::eval_jac_g:: a component of "
00779           "gradient of g is not finite"
00780      );
00781 # endif
00782 
00783      return true;
00784 }
00785 
00786 /*!
00787 Evaluate the Hessian of the Lagragian
00788 
00789 \section The_Hessian_of_the_Lagragian The Hessian of the Lagragian
00790 The Hessian of the Lagragian is defined as
00791 \f[
00792 H(x, \sigma, \lambda ) 
00793 =
00794 \sigma \nabla^2 f(x) + \sum_{i=0}^{m-1} \lambda_i \nabla^2 g(x)_i
00795 \f]
00796 
00797 \param[in] n
00798 is the dimension of the argument space for g(x); i.e., must be equal \c n_.
00799 
00800 \param x
00801 if \c values is not \c NULL, \c x
00802 is a vector of size \c n containing the point at which to evaluate
00803 the Hessian of the Lagrangian.
00804 
00805 \param[in] new_x
00806 is false if the previous call to any one of the 
00807 \ref Evaluation_Methods used the same value for \c x.
00808 
00809 \param[in] obj_factor
00810 the value \f$ \sigma \f$ multiplying the Hessian of
00811 f(x) in the expression for \ref The_Hessian_of_the_Lagragian.
00812 
00813 \param[in] m
00814 is the dimension of the range space for g(x); i.e., must be equal to \c m_.
00815 
00816 \param[in] lambda
00817 if \c values is not \c NULL, \c lambda
00818 is a vector of size \c m specifing the value of \f$ \lambda \f$
00819 in the expression for \ref The_Hessian_of_the_Lagragian.
00820 
00821 \param[in] new_lambda
00822 is true if the previous call to \c eval_h had the same value for
00823 \c lambda and false otherwise.
00824 (Not currently used.)
00825 
00826 \param[in] nele_hess
00827 is the number of possibly non-zero elements in the Hessian of the Lagragian;
00828 i.e., must be equal to \c nnz_h_lag_.
00829 
00830 \param iRow
00831 if \c values is not \c NULL, \c iRow is not defined.
00832 if \c values is \c NULL, \c iRow
00833 is a vector with size \c nele_jac.
00834 The input value of its elements does not matter.
00835 On output, 
00836 For <tt>k = 0 , ... , nele_jac-1, iRow[k]</tt> is the 
00837 base zero row index for the 
00838 k-th possibly non-zero entry in the Hessian of the Lagragian.
00839 
00840 \param jCol
00841 if \c values is not \c NULL, \c jCol is not defined.
00842 if \c values is \c NULL, \c jCol
00843 is a vector with size \c nele_jac.
00844 The input value of its elements does not matter.
00845 On output, 
00846 For <tt>k = 0 , ... , nele_jac-1, jCol[k]</tt> is the 
00847 base zero column index for the 
00848 k-th possibly non-zero entry in the Hessian of the Lagragian.
00849 
00850 \param values
00851 if \c values is not \c NULL, it
00852 is a vector with size \c nele_jac.
00853 The input value of its elements does not matter.
00854 On output, 
00855 For <tt>k = 0 , ... , nele_jac-1, values[k]</tt> is the 
00856 value for the 
00857 k-th possibly non-zero entry in the Hessian of the Lagragian.
00858 
00859 \return
00860 The return value is always true; see \ref Evaluation_Methods.
00861 */
00862 bool cppad_ipopt_nlp::eval_h(Index n, const Number* x, bool new_x,
00863                    Number obj_factor, Index m, const Number* lambda,
00864                    bool new_lambda, Index nele_hess, Index* iRow,
00865                    Index* jCol, Number* values)
00866 {    CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ );
00867      CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
00868 
00869      size_t i, j, k, ell, l;
00870      std::map<size_t,size_t>::iterator index_ij;
00871 
00872      if (values == NULL) 
00873      {    for(k = 0; k < nnz_h_lag_; k++)
00874           {    iRow[k] = iRow_h_lag_[k];
00875                jCol[k] = jCol_h_lag_[k];
00876           }
00877           return true;
00878      }
00879 
00880      // initialize summation
00881      l = nnz_h_lag_;
00882      while(l--)
00883           values[l] = 0.;
00884 
00885      // update tape_ok_ flag
00886      for(k = 0; k < K_; k++) 
00887      {    if( retape_[k] && (new_x || L_[k] > 1) )
00888                tape_ok_[k] = false;
00889      }
00890 
00891      for(k = 0; k < K_; k++) for(ell = 0; ell < L_[k]; ell++)
00892      {    fg_info_->index(k, ell, I_, J_);
00893           bool in_use = false;
00894           for(i = 0; i < p_[k]; i++)
00895           {    if( I_[i] == 0 )
00896                     in_use |= obj_factor > 0.;
00897                else in_use |= lambda[ I_[i] - 1 ] > 0;
00898           }
00899           if( in_use )
00900           {
00901                if( ! tape_ok_[k]  )
00902                {    // Record r_k for value of u corresponding to x
00903                     fun_record(
00904                          fg_info_        ,   // inputs
00905                          k               ,
00906                          p_              ,
00907                          q_              ,
00908                          n_              ,
00909                          x               ,
00910                          J_              ,
00911                          r_fun_              // output
00912                     );
00913                     tape_ok_[k] = ! (retape_[k] || L_[k] > 1);
00914                }
00915                NumberVector w(p_[k]);
00916                NumberVector r_hes(q_[k] * q_[k]);
00917                NumberVector u(q_[k]);
00918                for(j = 0; j < q_[k]; j++)
00919                {    CPPAD_ASSERT_UNKNOWN( J_[j] < n_ );
00920                     u[j]   = x[ J_[j] ];
00921                }
00922                for(i = 0; i < p_[k]; i++)
00923                {    CPPAD_ASSERT_UNKNOWN( I_[i] <= m_ );
00924                     if( I_[i] == 0 )
00925                          w[i] = obj_factor;
00926                     else w[i] = lambda[ I_[i] - 1 ];
00927                }
00928                if( retape_[k] )
00929                     r_hes = r_fun_[k].Hessian(u, w);
00930                else r_hes = r_fun_[k].SparseHessian(
00931                          u, w, pattern_hes_r_[k]
00932                );
00933                for(i = 0; i < q_[k]; i++) for(j = 0; j < q_[k]; j++) 
00934                if( J_[j] <= J_[i] ) 
00935                {    index_ij = index_hes_fg_[J_[i]].find(J_[j]);
00936                     if( index_ij != index_hes_fg_[J_[i]].end() )
00937                     {    l          = index_ij->second;
00938                          values[l] += r_hes[i * q_[k] + j];
00939                     }
00940                     else CPPAD_ASSERT_UNKNOWN(
00941                          r_hes[i * q_[k] + j] == 0.
00942                     );
00943                }
00944           }
00945      }
00946 # ifndef NDEBUG
00947      for(l = 0; l < nnz_h_lag_; l++) CPPAD_ASSERT_KNOWN(
00948           (-infinity_ < values[l]) && (values[l] < infinity_),
00949           "cppad_ipopt_nlp::eval_h:: a component of "
00950           "Hessian of Lagragian is not finite"
00951      );
00952 # endif
00953      return true;
00954 }
00955 
00956 /*!
00957 Pass solution information from Ipopt to users solution structure.
00958 
00959 \param[in] status
00960 is value that the Ipopt solution status
00961 which gets mapped to a correponding value for 
00962 \n
00963 <tt>solution_->status</tt>.
00964 
00965 \param[in] n
00966 is the dimension of the domain space for f(x) and g(x); i.e.,
00967 it must be equal to \c n_.
00968 
00969 \param[in] x
00970 is a vector with size \c n specifing the final solution.
00971 \n
00972 <tt>solution_->x</tt> is set to be a vector with size \c n
00973 and to have the same element values.
00974 
00975 \param[in] z_L
00976 is a vector with size \c n specifing the Lagragian multipliers for the
00977 constraint \f$ x^l \leq x \f$.
00978 \n
00979 <tt>solution_->z_l</tt> is set to be a vector with size \c n
00980 and to have the same element values.
00981 
00982 \param[in] z_U
00983 is a vector with size \c n specifing the Lagragian multipliers for the
00984 constraint \f$ x \leq x^u \f$.
00985 \n
00986 <tt>solution_->z_u</tt> is set to be a vector with size \c n
00987 and to have the same element values.
00988 
00989 \param[in] m
00990 is the dimension of the range space for g(x). i.e.,
00991 it must be equal to \c m_.
00992 
00993 \param[in] g
00994 is a vector with size \c m containing the value of the constraint function
00995 g(x) at the final solution for \c x.
00996 \n
00997 <tt>solution_->g</tt> is set to be a vector with size \c m
00998 and to have the same element values.
00999 
01000 \param[in] lambda
01001 is a vector with size \c m specifing the Lagragian multipliers for the
01002 constraints \f$ g^l \leq g(x) \leq g^u \f$.
01003 \n
01004 <tt>solution_->lambda</tt> is set to be a vector with size \c m
01005 and to have the same element values.
01006 
01007 \param[in] obj_value
01008 is the value of the objective function f(x) at the final solution for \c x.
01009 \n
01010 <tt>solution_->obj_value</tt> is set to have the same value.
01011 
01012 \param[in] ip_data
01013 is unspecified (by Ipopt) and hence not used.
01014 
01015 \param[in] ip_cq
01016 is unspecified (by Ipopt) and hence not used.
01017 
01018 \par solution_[out]
01019 the pointer \c solution_ , which is equal to the pointer \c solution
01020 in the constructor for \c cppad_ipopt_nlp,
01021 is used to set output values (see documentation above).
01022 */
01023 void cppad_ipopt_nlp::finalize_solution(
01024      Ipopt::SolverReturn               status    ,
01025      Index                             n         , 
01026      const Number*                     x         , 
01027      const Number*                     z_L       , 
01028      const Number*                     z_U       ,
01029      Index                             m         , 
01030      const Number*                     g         , 
01031      const Number*                     lambda    ,
01032      Number                            obj_value ,
01033      const Ipopt::IpoptData*           ip_data   ,
01034      Ipopt::IpoptCalculatedQuantities* ip_cq
01035 )
01036 {    size_t i, j;
01037 
01038      CPPAD_ASSERT_UNKNOWN(size_t(n) == n_ );
01039      CPPAD_ASSERT_UNKNOWN(size_t(m) == m_ );
01040 
01041      switch(status)
01042      {    // convert status from Ipopt enum to cppad_ipopt_solution enum
01043           case Ipopt::SUCCESS:
01044           solution_->status = 
01045                cppad_ipopt_solution::success;
01046           break;
01047 
01048           case Ipopt::MAXITER_EXCEEDED:
01049           solution_->status = 
01050                cppad_ipopt_solution::maxiter_exceeded;
01051           break;
01052 
01053           case Ipopt::STOP_AT_TINY_STEP:
01054           solution_->status = 
01055                cppad_ipopt_solution::stop_at_tiny_step;
01056           break;
01057 
01058           case Ipopt::STOP_AT_ACCEPTABLE_POINT:
01059           solution_->status = 
01060                cppad_ipopt_solution::stop_at_acceptable_point;
01061           break;
01062 
01063           case Ipopt::LOCAL_INFEASIBILITY:
01064           solution_->status = 
01065                cppad_ipopt_solution::local_infeasibility;
01066           break;
01067 
01068           case Ipopt::USER_REQUESTED_STOP:
01069           solution_->status = 
01070                cppad_ipopt_solution::user_requested_stop;
01071           break;
01072 
01073           case Ipopt::DIVERGING_ITERATES:
01074           solution_->status = 
01075                cppad_ipopt_solution::diverging_iterates;
01076           break;
01077 
01078           case Ipopt::RESTORATION_FAILURE:
01079           solution_->status = 
01080                cppad_ipopt_solution::restoration_failure;
01081           break;
01082 
01083           case Ipopt::ERROR_IN_STEP_COMPUTATION:
01084           solution_->status = 
01085                cppad_ipopt_solution::error_in_step_computation;
01086           break;
01087 
01088           case Ipopt::INVALID_NUMBER_DETECTED:
01089           solution_->status = 
01090                cppad_ipopt_solution::invalid_number_detected;
01091           break;
01092 
01093           case Ipopt::INTERNAL_ERROR:
01094           solution_->status = 
01095                cppad_ipopt_solution::internal_error;
01096           break;
01097 
01098           default:
01099           solution_->status = 
01100                cppad_ipopt_solution::unknown;
01101      }
01102 
01103      solution_->x.resize(n_);
01104      solution_->z_l.resize(n_);
01105      solution_->z_u.resize(n_);
01106      for(j = 0; j < n_; j++)
01107      {    solution_->x[j]    = x[j];
01108           solution_->z_l[j]  = z_L[j];
01109           solution_->z_u[j]  = z_U[j];
01110      }
01111      solution_->g.resize(m_);
01112      solution_->lambda.resize(m_);
01113      for(i = 0; i < m_; i++)
01114      {    solution_->g[i]      = g[i];
01115           solution_->lambda[i] = lambda[i];
01116      }
01117      solution_->obj_value = obj_value;
01118      return;
01119 }
01120 
01121 // This routine is defined, but not yet used
01122 // (trying to figure out a problem with Ipopt-3.9.1 and dismod4).
01123 bool cppad_ipopt_nlp::intermediate_callback(
01124      Ipopt::AlgorithmMode              mode,
01125      Index                             iter, 
01126      Number                            obj_value,
01127      Number                            inf_pr, 
01128      Number                            inf_du,
01129      Number                            mu, 
01130      Number                            d_norm,
01131      Number                            regularization_size,
01132      Number                            alpha_du, 
01133      Number                            alpha_pr,
01134      Index                             ls_trials,
01135      const Ipopt::IpoptData*           ip_data,
01136      Ipopt::IpoptCalculatedQuantities* ip_cq
01137 )
01138 {
01139      // std::cout << "intermediate_callback called" << std::endl;
01140      return true;
01141 }
01142 
01143 // ---------------------------------------------------------------------------
01144 /*! \} */
01145 } // end namespace cppad_ipopt
01146 // ---------------------------------------------------------------------------
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines