demos/OptimalPlanning.cpp
00001 /********************************************************************* 00002 * Software License Agreement (BSD License) 00003 * 00004 * Copyright (c) 2011, Rice University 00005 * All rights reserved. 00006 * 00007 * Redistribution and use in source and binary forms, with or without 00008 * modification, are permitted provided that the following conditions 00009 * are met: 00010 * 00011 * * Redistributions of source code must retain the above copyright 00012 * notice, this list of conditions and the following disclaimer. 00013 * * Redistributions in binary form must reproduce the above 00014 * copyright notice, this list of conditions and the following 00015 * disclaimer in the documentation and/or other materials provided 00016 * with the distribution. 00017 * * Neither the name of the Rice University nor the names of its 00018 * contributors may be used to endorse or promote products derived 00019 * from this software without specific prior written permission. 00020 * 00021 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 00022 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 00023 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 00024 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 00025 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 00026 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, 00027 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 00028 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 00029 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 00030 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN 00031 * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 00032 * POSSIBILITY OF SUCH DAMAGE. 00033 *********************************************************************/ 00034 00035 /* Author: Luis G. Torres */ 00036 00037 #include <ompl/base/SpaceInformation.h> 00038 #include <ompl/base/objectives/PathLengthOptimizationObjective.h> 00039 #include <ompl/base/objectives/StateCostIntegralObjective.h> 00040 #include <ompl/base/objectives/MaximizeMinClearanceObjective.h> 00041 #include <ompl/base/spaces/RealVectorStateSpace.h> 00042 #include <ompl/geometric/planners/rrt/RRTstar.h> 00043 00044 #include <fstream> 00045 00046 namespace ob = ompl::base; 00047 namespace og = ompl::geometric; 00049 // Our "collision checker". For this demo, our robot's state space 00050 // lies in [0,1]x[0,1], with a circular obstacle of radius 0.25 00051 // centered at (0.5,0.5). Any states lying in this circular region are 00052 // considered "in collision". 00053 class ValidityChecker : public ob::StateValidityChecker 00054 { 00055 public: 00056 ValidityChecker(const ob::SpaceInformationPtr& si) : 00057 ob::StateValidityChecker(si) {} 00058 00059 // Returns whether the given state's position overlaps the 00060 // circular obstacle 00061 bool isValid(const ob::State* state) const 00062 { 00063 return this->clearance(state) > 0.0; 00064 } 00065 00066 // Returns the distance from the given state's position to the 00067 // boundary of the circular obstacle. 00068 double clearance(const ob::State* state) const 00069 { 00070 // We know we're working with a RealVectorStateSpace in this 00071 // example, so we downcast state into the specific type. 00072 const ob::RealVectorStateSpace::StateType* state2D = 00073 state->as<ob::RealVectorStateSpace::StateType>(); 00074 00075 // Extract the robot's (x,y) position from its state 00076 double x = state2D->values[0]; 00077 double y = state2D->values[1]; 00078 00079 // Distance formula between two points, offset by the circle's 00080 // radius 00081 return sqrt((x-0.5)*(x-0.5) + (y-0.5)*(y-0.5)) - 0.25; 00082 } 00083 }; 00084 00085 ob::OptimizationObjectivePtr getPathLengthObjective(const ob::SpaceInformationPtr& si); 00086 00087 ob::OptimizationObjectivePtr getThresholdPathLengthObj(const ob::SpaceInformationPtr& si); 00088 00089 ob::OptimizationObjectivePtr getClearanceObjective(const ob::SpaceInformationPtr& si); 00090 00091 ob::OptimizationObjectivePtr getBalancedObjective1(const ob::SpaceInformationPtr& si); 00092 00093 ob::OptimizationObjectivePtr getBalancedObjective2(const ob::SpaceInformationPtr& si); 00094 00095 ob::OptimizationObjectivePtr getPathLengthObjWithCostToGo(const ob::SpaceInformationPtr& si); 00096 00097 void plan(int argc, char** argv) 00098 { 00099 // Construct the robot state space in which we're planning. We're 00100 // planning in [0,1]x[0,1], a subset of R^2. 00101 ob::StateSpacePtr space(new ob::RealVectorStateSpace(2)); 00102 00103 // Set the bounds of space to be in [0,1]. 00104 space->as<ob::RealVectorStateSpace>()->setBounds(0.0, 1.0); 00105 00106 // Construct a space information instance for this state space 00107 ob::SpaceInformationPtr si(new ob::SpaceInformation(space)); 00108 00109 // Set the object used to check which states in the space are valid 00110 si->setStateValidityChecker(ob::StateValidityCheckerPtr(new ValidityChecker(si))); 00111 00112 si->setup(); 00113 00114 // Set our robot's starting state to be the bottom-left corner of 00115 // the environment, or (0,0). 00116 ob::ScopedState<> start(space); 00117 start->as<ob::RealVectorStateSpace::StateType>()->values[0] = 0.0; 00118 start->as<ob::RealVectorStateSpace::StateType>()->values[1] = 0.0; 00119 00120 // Set our robot's goal state to be the top-right corner of the 00121 // environment, or (1,1). 00122 ob::ScopedState<> goal(space); 00123 goal->as<ob::RealVectorStateSpace::StateType>()->values[0] = 1.0; 00124 goal->as<ob::RealVectorStateSpace::StateType>()->values[1] = 1.0; 00125 00126 // Create a problem instance 00127 ob::ProblemDefinitionPtr pdef(new ob::ProblemDefinition(si)); 00128 00129 // Set the start and goal states 00130 pdef->setStartAndGoalStates(start, goal); 00131 00132 // Since we want to find an optimal plan, we need to define what 00133 // is optimal with an OptimizationObjective structure. Un-comment 00134 // exactly one of the following 6 lines to see some examples of 00135 // optimization objectives. 00136 pdef->setOptimizationObjective(getPathLengthObjective(si)); 00137 // pdef->setOptimizationObjective(getThresholdPathLengthObj(si)); 00138 // pdef->setOptimizationObjective(getClearanceObjective(si)); 00139 // pdef->setOptimizationObjective(getBalancedObjective1(si)); 00140 // pdef->setOptimizationObjective(getBalancedObjective2(si)); 00141 // pdef->setOptimizationObjective(getPathLengthObjWithCostToGo(si)); 00142 00143 // Construct our optimal planner using the RRTstar algorithm. 00144 ob::PlannerPtr optimizingPlanner(new og::RRTstar(si)); 00145 00146 // Set the problem instance for our planner to solve 00147 optimizingPlanner->setProblemDefinition(pdef); 00148 optimizingPlanner->setup(); 00149 00150 // attempt to solve the planning problem within one second of 00151 // planning time 00152 ob::PlannerStatus solved = optimizingPlanner->solve(1.0); 00153 00154 if (solved) 00155 { 00156 // Output the length of the path found 00157 std::cout 00158 << "Found solution of path length " 00159 << pdef->getSolutionPath()->length() << std::endl; 00160 00161 // If a filename was specified, output the path as a matrix to 00162 // that file for visualization 00163 if (argc > 1) 00164 { 00165 std::ofstream outFile(argv[1]); 00166 boost::static_pointer_cast<og::PathGeometric>(pdef->getSolutionPath())-> 00167 printAsMatrix(outFile); 00168 outFile.close(); 00169 } 00170 } 00171 else 00172 std::cout << "No solution found." << std::endl; 00173 } 00174 00175 int main(int argc, char** argv) 00176 { 00177 plan(argc, argv); 00178 00179 return 0; 00180 } 00181 00186 ob::OptimizationObjectivePtr getPathLengthObjective(const ob::SpaceInformationPtr& si) 00187 { 00188 return ob::OptimizationObjectivePtr(new ob::PathLengthOptimizationObjective(si)); 00189 } 00190 00194 ob::OptimizationObjectivePtr getThresholdPathLengthObj(const ob::SpaceInformationPtr& si) 00195 { 00196 ob::OptimizationObjectivePtr obj(new ob::PathLengthOptimizationObjective(si)); 00197 obj->setCostThreshold(ob::Cost(1.51)); 00198 return obj; 00199 } 00200 00213 class ClearanceObjective : public ob::StateCostIntegralObjective 00214 { 00215 public: 00216 ClearanceObjective(const ob::SpaceInformationPtr& si) : 00217 ob::StateCostIntegralObjective(si, true) 00218 { 00219 } 00220 00221 // Our requirement is to maximize path clearance from obstacles, 00222 // but we want to represent the objective as a path cost 00223 // minimization. Therefore, we set each state's cost to be the 00224 // reciprocal of its clearance, so that as state clearance 00225 // increases, the state cost decreases. 00226 ob::Cost stateCost(const ob::State* s) const 00227 { 00228 return ob::Cost(1 / si_->getStateValidityChecker()->clearance(s)); 00229 } 00230 }; 00231 00234 ob::OptimizationObjectivePtr getClearanceObjective(const ob::SpaceInformationPtr& si) 00235 { 00236 return ob::OptimizationObjectivePtr(new ClearanceObjective(si)); 00237 } 00238 00251 ob::OptimizationObjectivePtr getBalancedObjective1(const ob::SpaceInformationPtr& si) 00252 { 00253 ob::OptimizationObjectivePtr lengthObj(new ob::PathLengthOptimizationObjective(si)); 00254 ob::OptimizationObjectivePtr clearObj(new ClearanceObjective(si)); 00255 00256 ob::MultiOptimizationObjective* opt = new ob::MultiOptimizationObjective(si); 00257 opt->addObjective(lengthObj, 10.0); 00258 opt->addObjective(clearObj, 1.0); 00259 00260 return ob::OptimizationObjectivePtr(opt); 00261 } 00262 00266 ob::OptimizationObjectivePtr getBalancedObjective2(const ob::SpaceInformationPtr& si) 00267 { 00268 ob::OptimizationObjectivePtr lengthObj(new ob::PathLengthOptimizationObjective(si)); 00269 ob::OptimizationObjectivePtr clearObj(new ClearanceObjective(si)); 00270 00271 return 10.0*lengthObj + clearObj; 00272 } 00273 00277 ob::OptimizationObjectivePtr getPathLengthObjWithCostToGo(const ob::SpaceInformationPtr& si) 00278 { 00279 ob::OptimizationObjectivePtr obj(new ob::PathLengthOptimizationObjective(si)); 00280 obj->setCostToGoHeuristic(&ob::goalRegionCostToGo); 00281 return obj; 00282 }