Package PyDSTool :: Package Toolbox :: Module phaseplane
[hide private]
[frames] | no frames]

Source Code for Module PyDSTool.Toolbox.phaseplane

   1  """ 
   2      Phase plane utilities. 
   3   
   4      Some 2011 functionality has not yet been updated to use the plotter 
   5      phase plane plotting manager. 
   6   
   7   
   8  IMPORTANT NOTE DURING DEVELOPMENT: 
   9  For now, many operations with nullclines assume that they are NOT multi-valued 
  10  as a function of their variables, and that they are monotonic only as the x 
  11  variable increases. 
  12   
  13  R. Clewley, 2006 - 2011 
  14  """ 
  15   
  16  from __future__ import division 
  17  # itertools, operator used for _filter_consecutive function 
  18  import itertools, operator 
  19   
  20  from PyDSTool import * 
  21  from PyDSTool.errors import PyDSTool_ValueError 
  22  from PyDSTool.MProject import * 
  23  from PyDSTool.utils import findClosestPointIndex 
  24  from PyDSTool.common import args, metric, metric_L2, metric_weighted_L2, \ 
  25       metric_float, remain, fit_quadratic, fit_exponential, fit_diff_of_exp, \ 
  26       smooth_pts, nearest_2n_indices, make_poly_interpolated_curve, simple_bisection 
  27  from PyDSTool.common import _seq_types, _num_types 
  28  import PyDSTool.Redirector as redirc 
  29   
  30  import numpy as np 
  31  try: 
  32      from numpy import unique 
  33  except ImportError: 
  34      # older version of numpy 
  35      from numpy import unique1d as unique 
  36  import matplotlib.pyplot as pp 
  37   
  38  from scipy.interpolate import UnivariateSpline, InterpolatedUnivariateSpline 
  39  from scipy.optimize import fsolve, minpack 
  40  from scipy.optimize import minpack, zeros 
  41  try: 
  42      newton_meth = minpack.newton 
  43  except AttributeError: 
  44      # newer version of scipy 
  45      newton_meth = zeros.newton 
  46  from scipy import linspace, isfinite, sign, alltrue, sometrue, arctan, arctan2 
  47   
  48  from random import uniform 
  49  import copy 
  50  import sys 
  51   
  52  norm = np.linalg.norm 
  53   
  54  # ---------------------------------------------------------------------------- 
  55   
  56  _functions = ['find_nullclines', 'make_distance_to_line_auxfn', 
  57                'make_distance_to_known_line_auxfn', 'crop_2D', 
  58                'find_period', 'make_flow_normal_event', 'filter_NaN', 
  59                'find_fixedpoints', 'find_steadystates', 'find_equilibria', 
  60                'get_perp', 'get_orthonormal', 'get_rotated', 'angle_to_vertical', 
  61                'is_min_bracket', 'find_nearest_sample_points_by_angle', 
  62                'closest_perp_distance_between_splines', 
  63                'closest_perp_distance_between_sample_points', 
  64                'closest_perp_distance_on_spline', 'project_point', 
  65                'line_intersection', 'find_saddle_manifolds', 'make_Jac', 
  66                'plot_PP_vf', 'plot_PP_fps', 'show_PPs', 'get_PP'] 
  67   
  68  _classes = ['distance_to_pointset', 'mesh_patch_2D', 'dx_scaled_2D', 
  69              'phaseplane', 'fixedpoint_nD', 'fixedpoint_2D', 'nullcline', 
  70              'Point2D', 'plotter_2D', 'local_linear_2D'] 
  71   
  72  _features = ['inflection_zone_leaf', 'inflection_zone_node', 
  73               'max_curvature_zone_leaf', 'max_curvature_zone_node'] 
  74   
  75  __all__ = _functions + _classes + _features + ['plotter'] 
  76   
  77  # ---------------------------------------------------------------------------- 
  78   
  79   
80 -class distance_to_pointset(object):
81 """First and second maximum and/or minimum distances of a point q 82 to a set of points, returning a dictionary keyed by 'min' and 83 'max' to dictionaries keyed by integers 1 and 2 (respectively). 84 The values of this dictionary are dictionaries of 85 'd' -> distance 86 'pos' -> index into pts 87 88 To restrict the search to a lower-dimensional subspace, specify a sub-set 89 of the variables in q. Defaults to Euclidean distance (normord=2),otherwise 90 specify metric-defining norm order. 91 92 To speed up the search, use n > 1 to create n segments of the pointset, 93 from which representative points will first be assessed, before the search 94 narrows to a particular segment. Be sure to use a large enough n given 95 the variation in each segment (which will otherwise cause spurious results 96 for certain test points). Default n=30. 97 [NB This option currently only returns the first max/min distances] 98 99 The radius option provides an opportunity to use information from 100 a previous call. This works for minimum distances only, and forces 101 the same segment to be searched, provided that the new point is 102 within the radius of the previous point (using 2-norm unless a 2D 103 Point is specified). CURRENTLY, THIS DOES NOT WORK WELL. KEEP IT 104 SWITCHED OFF USING (default) radius=0. 105 106 If gen is not None, it should contain a generator for use with isochron-related 107 events, for more accurate computations. The remaining arguments are associated 108 with this usage. 109 """ 110 111 _keys = ['d', 'pos'] 112
113 - def __init__(self, pts, n=30, radius=0, gen=None, iso_ev=None, other_evnames=None, 114 pars_to_vars=None):
115 self.all_pts = pts 116 self.radius = radius 117 num_pts = len(pts) 118 assert n < num_pts, "Use a larger pointset or fewer segments" 119 assert n > 0, "n must be a positive integer" 120 assert type(n) == int, "n must be a positive integer" 121 self.gen = gen 122 self.iso_ev = iso_ev 123 self.other_evnames = other_evnames 124 self.pars_to_vars = pars_to_vars 125 if n == 1: 126 # always search whole pointset 127 self.byseg = False 128 self.segments = None 129 self.rep_pts = None 130 self.seg_start_ixs = None 131 else: 132 # use segments to speed up search 133 self.byseg = True 134 step, m = divmod(num_pts, n) 135 if step < 2: 136 # Not enough points to use segments 137 self.byseg = False 138 self.segments = None 139 self.rep_pts = None 140 self.seg_start_ixs = None 141 else: 142 half_step = int(floor(step/2.)) 143 # space the segment representatives in middle of segments 144 self.seg_start_ixs = [step*i for i in range(n)] 145 rep_pts = [pts[step*i+half_step] for i in range(n)] 146 # treat any remainder segment specially 147 # if remainder is too small then combine it with previous 148 # segment (choose m>=6 fairly arbitrarily) 149 self.segments = [] 150 base_ix = 0 151 for i in range(n): 152 self.segments.append(pts[base_ix:base_ix+step]) 153 base_ix += step 154 if m >= 6: 155 half_step_last = int(floor(m/2.)) 156 self.segments.append(pts[base_ix:]) 157 rep_pts.append(pts[n*half_step+half_step_last]) 158 self.seg_start_ixs.append(step*n) 159 else: 160 # replace last segment with all the rest of the points, 161 # making it longer 162 self.segments[-1] = pts[base_ix-step:] 163 # leave rep_pts[-1] alone 164 self.rep_pts = pointsToPointset(rep_pts) 165 # record of [last segment identified for 1st and 2nd MINIMUM distance, 166 # last point] 167 # max distances are not currently recorded 168 self.clear_history()
169
170 - def clear_history(self):
171 self.history = [None, None, None]
172
173 - def __call__(self, q, use_norm=False, normord=2, minmax=['min', 'max']):
174 dic_info = self._distance(q, use_norm, normord, minmax) 175 if self.gen is not None: 176 # refine distance using gen_test_fn 177 perp_ev, t_ev = _find_min_pt(self.gen, q, 178 dic_info['min'][1]['pos'], self.all_pts, 179 self.pars_to_vars, self.iso_ev, 180 self.other_evnames) 181 ev_pt = perp_ev[0][q.coordnames] 182 if use_norm: 183 dic_info['min'][0] = norm(q - ev_pt, normord) 184 else: 185 m = q - ev_pt 186 try: 187 vars = q.coordnames 188 except AttributeError: 189 raise TypeError("Input point must be a Point object") 190 m[vars[0]] = abs(m[vars[0]]) 191 m[vars[1]] = abs(m[vars[1]]) 192 dic_info['min'][0] = m 193 # leave pos information alone -- it says what the nearest point is in 194 # self.all_pts 195 return dic_info
196
197 - def _distance(self, q, use_norm, normord, minmax):
198 try: 199 vars = q.coordnames 200 except AttributeError: 201 raise TypeError("Input point must be a Point object") 202 # HACK: cannot use Inf in dmin for these comparisons 203 # have to use a big number 204 if use_norm: 205 if 'min' in minmax: 206 dmin = (_num_inf, NaN) 207 dmin_old = (_num_inf, NaN) 208 def le(a,b): 209 return a < b
210 if 'max' in minmax: 211 dmax = (0, NaN) 212 dmax_old = (0, NaN) 213 def ge(a,b): 214 return a > b
215 def fn(p,q): 216 return norm(p-q, normord) 217 else: 218 if 'min' in minmax: 219 dmin = (array([_num_inf for v in vars]), NaN) 220 dmin_old = (array([_num_inf for v in vars]), NaN) 221 def le(a,b): 222 return sometrue(a-b<0) 223 if 'max' in minmax: 224 dmax = (array([0. for v in vars]), NaN) 225 dmax_old = (array([0. for v in vars]), NaN) 226 def ge(a,b): 227 return sometrue(a-b>0) 228 def fn(p,q): 229 m = p-q 230 m[vars[0]] = abs(m[vars[0]]) 231 m[vars[1]] = abs(m[vars[1]]) 232 return m 233 # if remain(self.all_pts.coordnames, vars) != []: 234 # raise TypeError("Input points must be a Pointset object sharing " 235 # "common coordinate names") 236 if self.byseg: 237 if self.history[0] is None or self.radius == 0: 238 use_history = False 239 else: 240 # use last known segment only if q is within radius of 241 # last q 242 try: 243 test = q-self.history[2] # a Point 244 except: 245 # problem with self.history[2] (externally tampered with?) 246 print "History is: ", self.history 247 raise RuntimeError("Invalid history object in " 248 "distance_to_pointset class instance") 249 try: 250 use_history = abs(test[0]) < self.radius[0] and \ 251 abs(test[1]) < self.radius[1] 252 except: 253 # radius is a scalar 254 use_history = norm(test) < self.radius 255 if use_history and minmax == ['min']: 256 res_min1 = self.search_min(q, self.segments[self.history[0]][vars], 257 fn, le, dmin, dmin_old) 258 res_min2 = self.search_min(q, self.segments[self.history[1]][vars], 259 fn, le, dmin, dmin_old) 260 rm1 = res_min1['min'][1] 261 rm2 = res_min2['min'][1] 262 if rm1['d'] < rm2['d']: 263 seg_min_pos1 = rm1['pos'] 264 seg_min_d1 = rm1['d'] 265 seg_min1 = self.history[0] 266 else: 267 seg_min_pos1 = rm2['pos'] 268 seg_min_d1 = rm2['d'] 269 seg_min1 = self.history[1] 270 global_min_ix1 = self.seg_start_ixs[seg_min1] + seg_min_pos1 271 return {'min': {1: {'d': seg_min_d1, 'pos': global_min_ix1}, 272 2: {'d': None, 'pos': None}}} 273 else: 274 if 'min' in minmax and 'max' in minmax: 275 first_pass = self.search_both(q, self.rep_pts[vars], 276 fn, le, ge, dmin, dmin_old, dmax, dmax_old) 277 # search 1st and 2nd segments for min and max 278 # the 'pos' index will be into the self.segments list 279 seg_ixs_min = [first_pass['min'][1]['pos']] 280 seg_ixs_min.append(first_pass['min'][2]['pos']) 281 seg_ixs_max = [first_pass['max'][1]['pos']] 282 seg_ixs_max.append(first_pass['max'][2]['pos']) 283 # min 284 res_min1 = self.search_min(q, self.segments[seg_ixs_min[0]][vars], 285 fn, le, dmin, dmin_old) 286 res_min2 = self.search_min(q, self.segments[seg_ixs_min[1]][vars], 287 fn, le, dmin, dmin_old) 288 rm1 = res_min1['min'][1] 289 rm2 = res_min2['min'][1] 290 if rm1['d'] < rm2['d']: 291 seg_min_pos1 = rm1['pos'] 292 seg_min_d1 = rm1['d'] 293 seg_min1 = seg_ixs_min[0] 294 seg_min2 = seg_ixs_min[1] 295 else: 296 seg_min_pos1 = rm2['pos'] 297 seg_min_d1 = rm2['d'] 298 seg_min1 = seg_ixs_min[1] 299 seg_min2 = seg_ixs_min[0] 300 global_min_ix1 = self.seg_start_ixs[seg_min1] + seg_min_pos1 301 # do second minimum here (NOT IMPLEMENTED) 302 # max 303 res_max1 = self.search_max(q, self.segments[seg_ixs_max[0]][vars], 304 fn, ge, dmax, dmax_old) 305 res_max2 = self.search_max(q, self.segments[seg_ixs_max[1]][vars], 306 fn, ge, dmax, dmax_old) 307 rm1 = res_max1['max'][1] 308 rm2 = res_max2['max'][1] 309 if rm1['d'] < rm2['d']: 310 seg_max_pos1 = rm1['pos'] 311 seg_max_d1 = rm1['d'] 312 seg_max1 = seg_ixs_max[0] 313 else: 314 seg_max_pos1 = rm2['pos'] 315 seg_max_d1 = rm2['d'] 316 seg_max1 = seg_ixs_max[1] 317 global_max_ix1 = self.seg_start_ixs[seg_max1] + seg_max_pos1 318 # do second maximum here (NOT IMPLEMENTED) 319 # array(q) is a cheap way to take a copy of q without 320 # making a new Point object 321 self.history = [seg_min1, seg_min2, array(q)] 322 return {'min': {1: {'d': seg_min_d1, 'pos': global_min_ix1}, 323 2: {'d': None, 'pos': None}}, 324 'max': {1: {'d': seg_max_d1, 'pos': global_max_ix1}, 325 2: {'d': None, 'pos': None}}} 326 elif 'min' in minmax: 327 first_pass = self.search_min(q, self.rep_pts[vars], 328 fn, le, dmin, dmin_old) 329 seg_ixs_min = [first_pass['min'][1]['pos']] 330 seg_ixs_min.append(first_pass['min'][2]['pos']) 331 res_min1 = self.search_min(q, self.segments[seg_ixs_min[0]][vars], 332 fn, le, dmin, dmin_old) 333 res_min2 = self.search_min(q, self.segments[seg_ixs_min[1]][vars], 334 fn, le, dmin, dmin_old) 335 rm1 = res_min1['min'][1] 336 rm2 = res_min2['min'][1] 337 if rm1['d'] < rm2['d']: 338 seg_min_pos1 = rm1['pos'] 339 seg_min_d1 = rm1['d'] 340 seg_min1 = seg_ixs_min[0] 341 seg_min2 = seg_ixs_min[1] 342 else: 343 seg_min_pos1 = rm2['pos'] 344 seg_min_d1 = rm2['d'] 345 seg_min1 = seg_ixs_min[1] 346 seg_min2 = seg_ixs_min[0] 347 global_min_ix1 = self.seg_start_ixs[seg_min1] + seg_min_pos1 348 # do second minimum here (NOT IMPLEMENTED) 349 # array(q) is a cheap way to take a copy of q without 350 # making a new Point object 351 self.history = [seg_min1, seg_min2, array(q)] 352 return {'min': {1: {'d': seg_min_d1, 'pos': global_min_ix1}, 353 2: {'d': None, 'pos': None}}} 354 elif 'max' in minmax: 355 first_pass = self.search_max(q, self.rep_pts[vars], 356 fn, ge, dmax, dmax_old) 357 seg_ixs_max = [first_pass['max'][1]['pos']] 358 seg_ixs_max.append(first_pass['max'][2]['pos']) 359 res_max1 = self.search_max(q, self.segments[seg_ixs_max[0]][vars], 360 fn, ge, dmax, dmax_old) 361 res_max2 = self.search_max(q, self.segments[seg_ixs_max[1]][vars], 362 fn, ge, dmax, dmax_old) 363 rm1 = res_max1['max'][1] 364 rm2 = res_max2['max'][1] 365 if rm1['d'] < rm2['d']: 366 seg_max_pos1 = rm1['pos'] 367 seg_max_d1 = rm1['d'] 368 seg_max1 = seg_ixs_max[0] 369 else: 370 seg_max_pos1 = rm2['pos'] 371 seg_max_d1 = rm2['d'] 372 seg_max1 = seg_ixs_max[1] 373 global_max_ix1 = self.seg_start_ixs[seg_max1] + seg_max_pos1 374 # do second maximum here (NOT IMPLEMENTED) 375 # don't update history because it's only for minimum distance 376 return {'max': {1: {'d': seg_max_d1, 'pos': global_max_ix1}, 377 2: {'d': None, 'pos': None}}} 378 else: 379 raise RuntimeError("Invalid min/max option") 380 else: 381 if 'min' in minmax and 'max' in minmax: 382 return self.search_both(q, self.all_pts[vars], 383 fn, le, ge, dmin, dmin_old, dmax, dmax_old) 384 elif 'min' in minmax: 385 return self.search_min(q, self.all_pts[vars], 386 fn, le, dmin, dmin_old) 387 elif 'max' in minmax: 388 return self.search_max(q, self.all_pts[vars], 389 fn, ge, dmax, dmax_old) 390 else: 391 raise RuntimeError("Invalid min/max option") 392 393
394 - def search_both(self, q, pts, 395 fn, le, ge, dmin, dmin_old, dmax, dmax_old):
396 for i, p in enumerate(pts): 397 d = fn(p,q) 398 if le(d, dmin[0]): 399 dmin_old = dmin 400 dmin = (d, i) 401 elif le(d, dmin_old[0]): 402 dmin_old = (d, i) 403 if ge(d, dmax[0]): 404 dmax_old = dmax 405 dmax = (d, i) 406 elif ge(d, dmax_old[0]): 407 dmax_old = (d,i) 408 return {'min': {1: dict(zip(self._keys,dmin)), 409 2: dict(zip(self._keys,dmin_old))}, 410 'max': {1: dict(zip(self._keys,dmax)), 411 2: dict(zip(self._keys,dmax_old))}}
412
413 - def search_min(self, q, pts, fn, le, dmin, dmin_old):
414 for i, p in enumerate(pts): 415 d = fn(p,q) 416 if le(d, dmin[0]): 417 dmin_old = dmin 418 dmin = (d, i) 419 elif le(d, dmin_old[0]): 420 dmin_old = (d, i) 421 return {'min': {1: dict(zip(self._keys,dmin)), 422 2: dict(zip(self._keys,dmin_old))}}
423
424 - def search_max(self, q, pts, fn, ge, dmax, dmax_old):
425 for i, p in enumerate(pts): 426 d = fn(p,q) 427 if ge(d, dmax[0]): 428 dmax_old = dmax 429 dmax = (d, i) 430 elif ge(d, dmax_old[0]): 431 dmax_old = (d,i) 432 return {'max': {1: dict(zip(self._keys,dmax)), 433 2: dict(zip(self._keys,dmax_old))}}
434 435
436 -def make_Jac(DS, varnames=None):
437 """Make a callable Python function for the Jacobian of dynamical system DS. 438 If DS is not a Generator or non-hybrid Model object, i.e. a HybridModel, then 439 there will be an exception raised unless the model contains only one sub-system. 440 441 If the optional varnames argument is given, the function's arguments will be 442 restricted to the given list. 443 """ 444 if varnames is None: 445 varnames = DS.query('vars') 446 subdomain = {} 447 fixedvars = remain(DS.query('vars'), varnames) 448 for k in fixedvars: 449 subdomain[k] = DS.query('ics')[k] 450 451 if isinstance(DS, Generator.Generator): 452 gen = DS 453 else: 454 # assume Model with 455 sub = DS.sub_models() 456 assert len(sub) == 1, \ 457 "Pass in sub-model explicitly if more than one in model" 458 sub_sub = sub[0].sub_models() 459 assert len(sub_sub) == 1, \ 460 "Pass in sub-model explicitly if more than one in model" 461 gen = sub_sub[0] 462 jac, new_fnspecs = prepJacobian(gen.funcspec._initargs['varspecs'], 463 varnames, 464 gen.funcspec._initargs['fnspecs']) 465 466 scope = copy.copy(DS.query('pars')) 467 scope.update(subdomain) 468 scope.update(new_fnspecs) 469 return expr2fun(jac, ensure_args=['t'], **scope)
470 471
472 -def find_nullclines(gen, xname, yname, subdomain=None, fps=None, n=10, 473 t=0, eps=None, crop_tol_pc=0.01, jac=None, max_step=0, 474 max_num_points=1000, only_var=None, seed_points=None, 475 strict_domains=True, pycont_cache=None):
476 """Find nullclines of a two-dimensional sub-system of the given 477 Generator object gen, specified by xname and yname. 478 479 Inputs: 480 481 gen is a Generator object 482 xname, yname are state variable names of gen 483 484 Optional inputs: 485 486 Restriction of x and y to sub-domains can be made using the subdomain argument. 487 subdomain may contain pairs (min, max) or singleton values that fix those state 488 variables and restrict the fixed point search to the remaining sub-system on the 489 given ranges. (default to domains given in generator). There must be exactly 490 two ranges remaining (for xname, yname) to give a two-dimensional nullcline 491 problem, unless only_var option has been used (see below). 492 493 n = initial number of meshpoints for fsolve. Do not set this large if using 494 PyCont, e.g. use n=3. Default is 10. 495 496 Set t value for non-autonomous systems (default 0). Support for Jacobians 497 with non-autonomous systems is not yet provided. 498 499 jac is a Jacobian function that accepts keyword arguments including t for 500 time (even if Jacobian is time-independent). 501 502 max_step (dictionary) tells PyCont the largest step size to use for each 503 variable. Integer 0 (default) switches off PyCont use. Use None 504 to tell PyCont to use default max step size (5e-1). 505 506 fps can be a list of points previously calculated to be fixed points, 507 which this function will use as additional starting points for 508 computation. 509 510 eps sets the accuracy to which the nullclines are calculated. Default is 511 approximately 1e-8. 512 513 crop_tol_pc is the percentage (default 1%) for tolerance outside the specified 514 domain for keeping points in nullclines -- final points will be cropped with 515 this tolerance. 516 517 only_var (variable name) requests that only the nullcline for that variable 518 will be computed. The variable name must correspond to xname or yname. 519 520 seed_points can be used if an approximate position on either nullcline is already 521 known, especially useful if there are no fixed points provided. One or more 522 seed points should be given as a list of dictionaries or Points with keys or 523 coordinates xname and yname. These should be collected as values of the 524 seed_points dictionary, whose keys (xname and/or yname) indicate which points 525 belong to which nullclines. 526 527 strict_domains (boolean, default True) ensures all computed nullcline points are 528 cropped to lie within the given sub-domains. 529 530 pycont_cache (list of two PyCont objects, default None) can be provided to 531 improve efficiency of PyCont use, to avoid re-creation of ODE objects for 532 each call to this function. If a list of [None, None] is provided, this will 533 ensure that a list of the two PyCont objects will be returned in an additional 534 output. !!Don't use this cache if external inputs to the Generator have changed!! 535 536 Output: 537 (x_null, y_null) arrays of (x,y) pairs. Fixed points will be included if 538 they are provided (they are expected to be calculated to high enough 539 accuracy to smoothly fit with the nullcline sample points). 540 541 (optional) [P_x, P_y] list of PyCont objects, returned if 542 pycont_cache input option is used (see above). 543 544 Note that the points returned are not guaranteed to be in any particular 545 order when PyCont is not used. 546 """ 547 vardict = filteredDict(copy.copy(gen.initialconditions), gen.funcspec.vars) 548 # get state variable domains if subdomain dictionary not given 549 if subdomain is None: 550 subdomain = filteredDict(gen.xdomain, gen.funcspec.vars) 551 else: 552 subdomain = gen._FScompatibleNames(subdomain) 553 assert remain(subdomain.keys(),gen.funcspec.vars) == [] and \ 554 remain(gen.funcspec.vars,subdomain.keys()) == [] 555 if only_var is None: 556 do_vars = [xname, yname] 557 else: 558 assert only_var in [xname, yname], "only_var must be one of xname or yname" 559 do_vars = [only_var] 560 fixed_vars = {} 561 try: 562 x_dom = subdomain[xname] 563 except: 564 x_dom = gen.xdomain[xname] 565 if not (isfinite(x_dom[0]) and isfinite(x_dom[1])): 566 raise PyDSTool_ExistError("Must specify finite range for %s"%xname) 567 try: 568 y_dom = subdomain[yname] 569 except: 570 y_dom = gen.xdomain[yname] 571 if not (isfinite(y_dom[0]) and isfinite(y_dom[1])): 572 raise PyDSTool_ExistError("Must specify finite range for %s"%yname) 573 for varname, dom in subdomain.iteritems(): 574 if isinstance(dom, (tuple,list)): 575 if not (isfinite(dom[0]) and isfinite(dom[1])): 576 raise RuntimeError("Must specify a finite range for %s" % varname) 577 else: 578 fixed_vars[varname] = dom 579 if fixed_vars != {}: 580 vardict.update(filteredDict(gen._FScompatibleNames(dict(fixed_vars)), 581 remain(gen.funcspec.vars, [xname, yname]))) 582 var_ix_map = invertMap(gen.funcspec.vars) 583 max_step = gen._FScompatibleNames(max_step) 584 xname_orig = gen._FScompatibleNamesInv(xname) 585 yname_orig = gen._FScompatibleNamesInv(yname) 586 xname = gen._FScompatibleNames(xname) 587 yname = gen._FScompatibleNames(yname) 588 x_ix = var_ix_map[xname] 589 y_ix = var_ix_map[yname] 590 pp_vars = [xname, yname] 591 pp_vars.sort() 592 pp_var_ix_map = invertMap(pp_vars) 593 594 # subscript refers to the component returned 595 # first letter refers to order of args 596 def xdot_x(x, y, t): 597 vardict[yname] = y 598 vardict[xname] = x 599 try: 600 return gen.Rhs(t,vardict,gen.pars)[x_ix] 601 except OverflowError: 602 pass
603 def ydot_y(y, x, t): 604 vardict[yname] = y 605 vardict[xname] = x 606 try: 607 return gen.Rhs(t,vardict,gen.pars)[y_ix] 608 except OverflowError: 609 pass 610 if gen.haveJacobian(): 611 # user-supplied Jacobian if present 612 # subscript refers to the component returned 613 def xfprime_x(x, y, t): 614 vardict[xname] = x 615 vardict[yname] = y 616 try: 617 return gen.Jacobian(t, vardict, gen.pars)[x_ix] 618 except OverflowError: 619 pass 620 def yfprime_y(y, x, t): 621 vardict[xname] = x 622 vardict[yname] = y 623 try: 624 return gen.Jacobian(t, vardict, gen.pars)[y_ix] 625 except OverflowError: 626 pass 627 elif jac is not None: 628 # assumes **args-compatible signature! 629 # subscript refers to the component returned 630 xix = pp_var_ix_map[xname] 631 yix = pp_var_ix_map[yname] 632 xfprime_x = lambda x, y, t: array(jac(**{'t': t, xname: float(x), 633 yname: float(y)})[xix]) 634 yfprime_y = lambda y, x, t: array(jac(**{'t': t, xname: float(x), 635 yname: float(y)})[yix]) 636 else: 637 xfprime_x = None 638 yfprime_y = None 639 640 if eps is None: 641 eps = 1.49012e-8 642 643 # ranges for initial search for nullcline points 644 x_range = list(linspace(gen.xdomain[xname][0],gen.xdomain[xname][1],n)) 645 y_range = list(linspace(gen.xdomain[yname][0],gen.xdomain[yname][1],n)) 646 647 if fps is not None: 648 fps_FS = [gen._FScompatibleNames(fp) for fp in fps] 649 add_pts_x = [fp[xname] for fp in fps_FS] 650 add_pts_y = [fp[yname] for fp in fps_FS] 651 else: 652 add_pts_x = [] 653 add_pts_y = [] 654 655 x_null_pts = [] 656 y_null_pts = [] 657 # None dummies in case only_var avoids calculating one of them (for purposes 658 # of returning the right set of arguments at end) 659 x_null = None 660 y_null = None 661 662 seed_pts_x = [] 663 seed_pts_y = [] 664 if x_dom != gen.xdomain[xname] or y_dom != gen.xdomain[yname]: 665 # user sub-domains - make sure there's at least one start point inside 666 seed_pts_x.append( 0.5*(x_dom[0]+x_dom[1]) ) 667 seed_pts_y.append( 0.5*(y_dom[0]+y_dom[1]) ) 668 669 if seed_points is not None: 670 seed_points = gen._FScompatibleNames(seed_points) 671 if yname in seed_points: 672 seed_pts_x_for_ynull = [pt[xname] for pt in seed_points[yname]] 673 seed_pts_y_for_ynull = [pt[yname] for pt in seed_points[yname]] 674 else: 675 seed_pts_x_for_ynull = [] 676 seed_pts_y_for_ynull = [] 677 if xname in seed_points: 678 seed_pts_x_for_xnull = [pt[xname] for pt in seed_points[xname]] 679 seed_pts_y_for_xnull = [pt[yname] for pt in seed_points[xname]] 680 else: 681 seed_pts_x_for_xnull = [] 682 seed_pts_y_for_xnull = [] 683 seed_pts_x.extend(seed_pts_x_for_xnull+seed_pts_x_for_ynull) 684 seed_pts_y.extend(seed_pts_y_for_xnull+seed_pts_y_for_ynull) 685 else: 686 seed_pts_x_for_ynull = seed_pts_x_for_xnull = [] 687 seed_pts_y_for_ynull = seed_pts_y_for_xnull = [] 688 689 x_range = seed_pts_x + add_pts_x + x_range 690 y_range = seed_pts_y + add_pts_y + y_range 691 692 # try to improve seed points or otherwise use crappy fsolve to find some 693 # points on the nullclines. 694 rout = redirc.Redirector(redirc.STDOUT) 695 rout.start() 696 if yname in do_vars: 697 for x0 in x_range: 698 try: 699 x_null_pts.extend([(_xinf_ND(xdot_x,x0,args=(y,t), 700 xddot=xfprime_x,xtol=eps/10.),y) for y in y_range]) 701 y_null_pts.extend([(x0,_xinf_ND(ydot_y,y,args=(x0,t), 702 xddot=yfprime_y,xtol=eps/10.)) for y in y_range]) 703 except (OverflowError, ZeroDivisionError): 704 rout.stop() 705 except: 706 rout.stop() 707 raise 708 if xname in do_vars: 709 for y0 in y_range: 710 try: 711 x_null_pts.extend([(_xinf_ND(xdot_x,x,args=(y0,t), 712 xddot=xfprime_x,xtol=eps/10.),y0) for x in x_range]) 713 y_null_pts.extend([(x,_xinf_ND(ydot_y,y0,args=(x,t), 714 xddot=yfprime_y,xtol=eps/10.)) for x in x_range]) 715 except (OverflowError, ZeroDivisionError): 716 rout.stop() 717 except: 718 rout.stop() 719 raise 720 rout.stop() 721 722 # intervals based on user specs (if different to inherent variable domains) 723 xwidth = abs(x_dom[1]-x_dom[0]) 724 ywidth = abs(y_dom[1]-y_dom[0]) 725 xinterval=Interval('xdom', float, [x_dom[0]-crop_tol_pc*xwidth, 726 x_dom[1]+crop_tol_pc*xwidth]) 727 yinterval=Interval('ydom', float, [y_dom[0]-crop_tol_pc*ywidth, 728 y_dom[1]+crop_tol_pc*ywidth]) 729 730 x_null_fsolve = filter_close_points(crop_2D(filter_NaN(x_null_pts), 731 xinterval, yinterval), eps*10) 732 y_null_fsolve = filter_close_points(crop_2D(filter_NaN(y_null_pts), 733 xinterval, yinterval), eps*10) 734 735 # intervals based on inherent variable domains 736 if x_dom == gen.xdomain[xname]: 737 x_null_inh = [] 738 y_null_inh = [] 739 else: 740 x_dom_inh = gen.xdomain[xname] 741 y_dom_inh = gen.xdomain[yname] 742 xinterval_inh = Interval('xdom', float, [x_dom_inh[0], x_dom_inh[1]]) 743 yinterval_inh = Interval('ydom', float, [y_dom_inh[0], y_dom_inh[1]]) 744 x_null_inh = filter_close_points(crop_2D(filter_NaN(x_null_pts), 745 xinterval_inh, yinterval_inh), eps*10) 746 x_null_inh.sort(1) 747 y_null_inh = filter_close_points(crop_2D(filter_NaN(y_null_pts), 748 xinterval_inh, yinterval_inh), eps*10) 749 y_null_inh.sort(0) 750 751 752 # default value for now 753 do_cache = False 754 # max_step = 0 means do not use PyCont to improve accuracy 755 if max_step != 0: 756 if pycont_cache is None: 757 pycont_cache = [None, None] # dummy values 758 do_cache = False 759 else: 760 do_cache = True 761 add_fp_pts = array([add_pts_x, add_pts_y]).T 762 if not isinstance(max_step, dict): 763 max_step = {xname: max_step, yname: max_step} 764 # use PyCont to improve accuracy 765 # PyCont will order the points so can draw lines 766 loop_step = 5 767 # current limitation! 768 # pycont won't find disconnected branches of nullclines so we have to seed 769 # with multiple starting points 770 # - also, the following assumes more than one point was already added to 771 # these lists 772 773 # Y 774 if yname in do_vars: 775 # set up a prioritized list of starting points 776 x_init_priority_list = [] 777 y_init_priority_list = [] 778 if fps is not None and len(fps) > 0: 779 x_init = fps[0][xname_orig] + 1e-4*(x_dom[1]-x_dom[0]) 780 y_init = fps[0][yname_orig] + 1e-4*(y_dom[1]-y_dom[0]) 781 x_init_priority_list.append(x_init) 782 y_init_priority_list.append(y_init) 783 if len(y_null_fsolve) > 0: 784 # get an initial point from user-specified sub-domain 785 index = int(len(y_null_fsolve)/2.) 786 x_init, y_init = y_null_fsolve[index] 787 x_init_priority_list.append(x_init) 788 y_init_priority_list.append(y_init) 789 x_init_priority_list.extend( seed_pts_x_for_ynull ) 790 y_init_priority_list.extend( seed_pts_y_for_ynull ) 791 if len(y_null_inh) > 0: 792 old_indices = [] 793 for ratio in (0.25, 0.5, 0.75): 794 index = int(len(y_null_inh)*ratio) 795 if index in old_indices: 796 continue 797 else: 798 old_indices.append(index) 799 x_init, y_init = y_null_inh[index] 800 x_init_priority_list.append(x_init) 801 y_init_priority_list.append(y_init) 802 # lowest priority: domain midpoint 803 x_init_priority_list.append( (x_dom[0]+x_dom[1])/2. ) 804 y_init_priority_list.append( (y_dom[0]+y_dom[1])/2. ) 805 806 # initialize starting point 807 x_init = x_init_priority_list[0] 808 y_init = y_init_priority_list[0] 809 810 max_tries = min(len(x_init_priority_list), 4) 811 812 if pycont_cache[1] is None: 813 sysargs_y = args(name='nulls_y', pars={xname: x_init}, 814 ics={yname: y_init}, 815 xdomain={yname: gen.xdomain[yname]}, pdomain={xname: gen.xdomain[xname]}) 816 if 'inputs' in gen.funcspec._initargs: # and isinstance(gen.funcspec._initargs['inputs'], dict): 817 if len(gen.funcspec._initargs['inputs']) > 0: 818 sysargs_y['inputs'] = gen.inputs.copy() 819 sysargs_y.pars.update(gen.pars) 820 sysargs_y.pars.update(filteredDict(vardict, [xname, yname], neg=True)) 821 varspecs = {yname: gen.funcspec._initargs['varspecs'][yname]} 822 if 'fnspecs' in gen.funcspec._initargs: 823 old_fnspecs = gen.funcspec._initargs['fnspecs'] 824 fnspecs, new_varspecs = resolveClashingAuxFnPars(old_fnspecs, varspecs, 825 sysargs_y.pars.keys()) 826 # TEMP 827 # process any Jacobian functions to remove un-needed terms 828 if 'Jacobian' in fnspecs: 829 del fnspecs['Jacobian'] 830 ## fnspecs['Jacobian'] = makePartialJac(fnspecs['Jacobian'], [yname]) 831 ## if 'Jacobian_pars' in fnspecs: 832 ## fnspecs['Jacobian_pars'] = makePartialJac(fnspecs['Jacobian_pars'], [yname]) 833 ## elif 'Jacobian' in fnspecs: 834 ## old_fargs, J_par_spec = makePartialJac(old_fnspecs['Jacobian'], [yname], [xname]) 835 ## fnspecs['Jacobian_pars'] = (fnspecs['Jacobian'][0], J_par_spec) 836 sysargs_y.update({'fnspecs': fnspecs}) 837 else: 838 new_varspecs = varspecs 839 sysargs_y.varspecs = new_varspecs 840 sysargs_y.pars['time'] = t 841 sys_y = Vode_ODEsystem(sysargs_y) 842 P_y = ContClass(sys_y) 843 pycont_cache[1] = P_y 844 else: 845 P_y = pycont_cache[1] 846 new_pars = gen.pars.copy() 847 new_pars.update(filteredDict(vardict, [xname, yname], neg=True)) 848 new_pars['time'] = t 849 P_y.model.set(pars=new_pars) 850 PCargs = args(name='null_curve_y', type='EP-C', force=True) 851 PCargs.freepars = [xname] 852 PCargs.MinStepSize = 1e-8 853 PCargs.VarTol = PCargs.FuncTol = eps*10 854 PCargs.TestTol = eps 855 if max_step is None: 856 PCargs.MaxStepSize = 5e-1 857 PCargs.StepSize = 1e-6 858 else: 859 PCargs.MaxStepSize = max_step[xname] 860 PCargs.StepSize = max_step[xname]*0.5 861 PCargs.MaxNumPoints = loop_step 862 PCargs.MaxCorrIters = 8 863 #PCargs.verbosity = 100 864 P_y.newCurve(PCargs) 865 866 # Continue, checking every loop_step points until go out of bounds 867 # FORWARD ########### 868 done = False 869 num_points = 0 870 in_subdom = x_init in xinterval and y_init in yinterval 871 tries = 0 872 while not done: 873 try: 874 P_y['null_curve_y'].forward() 875 except PyDSTool_ExistError: 876 tries += 1 877 if tries == max_tries: 878 print 'null_curve_y failed in forward direction' 879 raise 880 else: 881 # try a different starting point 882 x_init = x_init_priority_list[tries] 883 y_init = y_init_priority_list[tries] 884 sys_y.set(pars={xname: x_init}, ics={yname: y_init}) 885 else: 886 num_points += loop_step 887 # stop if have tried going forwards too much and solution becomes 888 # outside of sub-domains, or if num_points exceeds max_num_points 889 if in_subdom: 890 done = (num_points > 5*loop_step or num_points > max_num_points) 891 done = done and not \ 892 check_bounds(array([P_y['null_curve_y'].new_sol_segment[xname], 893 P_y['null_curve_y'].new_sol_segment[yname]]).T, 894 xinterval, yinterval) 895 y_null_part = crop_2D(array([P_y['null_curve_y'].sol[xname], 896 P_y['null_curve_y'].sol[yname]]).T, 897 xinterval, yinterval) 898 else: 899 # did not start in sub-domain and still not yet found 900 # it during continuation 901 y_null_part = crop_2D(array([P_y['null_curve_y'].sol[xname], 902 P_y['null_curve_y'].sol[yname]]).T, 903 xinterval, yinterval) 904 in_subom = len(y_null_part)>0 905 done = num_points > 15*loop_step 906 907 # BACKWARD ########### 908 done = False 909 num_points = 0 910 in_subdom = x_init in xinterval and y_init in yinterval 911 tries = 0 912 while not done: 913 try: 914 P_y['null_curve_y'].backward() 915 except PyDSTool_ExistError: 916 tries += 1 917 if tries == max_tries: 918 print 'null_curve_y failed in backward direction' 919 raise 920 else: 921 # try a different starting point 922 x_init = x_init_priority_list[tries] 923 y_init = y_init_priority_list[tries] 924 sys_y.set(pars={xname: x_init}, ics={yname: y_init}) 925 else: 926 num_points += loop_step 927 # stop if have tried going backwards too much and solution becomes 928 # outside of sub-domain, or if num_points exceeds max_num_points 929 if in_subdom: 930 done = (num_points > 5*loop_step or num_points > max_num_points) 931 done = done and not \ 932 check_bounds(array([P_y['null_curve_y'].new_sol_segment[xname], 933 P_y['null_curve_y'].new_sol_segment[yname]]).T, 934 xinterval, yinterval) 935 else: 936 # did not start in sub-domain and still not yet found 937 # it during continuation 938 y_null_part = crop_2D(array([P_y['null_curve_y'].sol[xname], 939 P_y['null_curve_y'].sol[yname]]).T, 940 xinterval, yinterval) 941 in_subom = len(y_null_part)>0 942 done = num_points > 15*loop_step 943 944 # overwrite y_null from fsolve, pre-PyCont 945 # can get repetition of some points 946 if strict_domains: 947 y_null = crop_2D(array([P_y['null_curve_y'].sol[xname], 948 P_y['null_curve_y'].sol[yname]]).T, 949 xinterval, yinterval) 950 else: 951 y_null = array([P_y['null_curve_y'].sol[xname], 952 P_y['null_curve_y'].sol[yname]]).T 953 try: 954 # dummy statement to test result 955 y_null[:,0], y_null[:,1] 956 except IndexError: 957 raise PyDSTool_ExistError('null_curve_y failed: points were out of domain') 958 y_null = uniquePoints(y_null) 959 # Occasional oservation: PyCont can mess up ordering of first few points 960 # Future idea: Try dodging those if the result wasn't monotonic. 961 962 # Add fixed points to nullcline, assuming sufficient accuracy and monotonicity 963 try: 964 fp_ixs = [findClosestPointIndex(pt, y_null) for pt in add_fp_pts] 965 except ValueError: 966 print "Non-monotonic data computed. Try (1) reducing max_step, (2) crop_tol_pc down to zero if non-monotonicity" 967 print " is at domain endpoints, or (3) normalize tolerances for fixed points with that of nullclines." 968 print " Not including fixed points in nullcline data" 969 else: 970 for n, ix in enumerate(fp_ixs): 971 # +n offsets fact that n entries were already added 972 y_null = np.insert(y_null, ix+n, add_fp_pts[n], axis=0) 973 974 # X 975 if xname in do_vars: 976 # set up a prioritized list of starting points 977 x_init_priority_list = [] 978 y_init_priority_list = [] 979 if fps is not None and len(fps) > 0: 980 x_init = fps[0][xname_orig] + 1e-4*(x_dom[1]-x_dom[0]) 981 y_init = fps[0][yname_orig] + 1e-4*(y_dom[1]-y_dom[0]) 982 x_init_priority_list.append(x_init) 983 y_init_priority_list.append(y_init) 984 if len(x_null_fsolve) > 0: 985 # get an initial point from user-specified sub-domain 986 index = int(len(x_null_fsolve)/2.) 987 x_init, y_init = x_null_fsolve[index] 988 x_init_priority_list.append(x_init) 989 y_init_priority_list.append(y_init) 990 x_init_priority_list.extend( seed_pts_x_for_xnull ) 991 y_init_priority_list.extend( seed_pts_y_for_xnull ) 992 if len(x_null_inh) > 0: 993 old_indices = [] 994 for ratio in (0.25, 0.5, 0.75): 995 index = int(len(x_null_inh)*ratio) 996 if index in old_indices: 997 continue 998 else: 999 old_indices.append(index) 1000 x_init, y_init = x_null_inh[index] 1001 x_init_priority_list.append(x_init) 1002 y_init_priority_list.append(y_init) 1003 # lowest priority: domain midpoint 1004 x_init_priority_list.append( (x_dom[0]+x_dom[1])/2. ) 1005 y_init_priority_list.append( (y_dom[0]+y_dom[1])/2. ) 1006 1007 # initialize starting point 1008 x_init = x_init_priority_list[0] 1009 y_init = y_init_priority_list[0] 1010 1011 max_tries = min(len(x_init_priority_list), 4) 1012 1013 if pycont_cache[0] is None: 1014 sysargs_x = args(name='nulls_x', pars={yname: y_init}, 1015 ics={xname: x_init}, tdata=[t,t+1], 1016 xdomain={xname: gen.xdomain[xname]}, pdomain={yname: gen.xdomain[yname]}) 1017 if 'inputs' in gen.funcspec._initargs: # and isinstance(gen.funcspec._initargs['inputs'], dict): 1018 if len(gen.funcspec._initargs['inputs']) > 0: 1019 sysargs_x['inputs'] = gen.inputs.copy() 1020 sysargs_x.pars.update(gen.pars) 1021 sysargs_x.pars.update(filteredDict(vardict, [xname, yname], neg=True)) 1022 varspecs = {xname: gen.funcspec._initargs['varspecs'][xname]} 1023 if 'fnspecs' in gen.funcspec._initargs: 1024 old_fnspecs = gen.funcspec._initargs['fnspecs'] 1025 fnspecs, new_varspecs = resolveClashingAuxFnPars(old_fnspecs, varspecs, 1026 sysargs_x.pars.keys()) 1027 # TEMP 1028 # process any Jacobian functions to remove un-needed terms 1029 if 'Jacobian' in fnspecs: 1030 del fnspecs['Jacobian'] 1031 ## fnspecs['Jacobian'] = makePartialJac(fnspecs['Jacobian'], [xname]) 1032 ## if 'Jacobian_pars' in fnspecs: 1033 ## fnspecs['Jacobian_pars'] = makePartialJac(fnspecs['Jacobian_pars'], [xname]) 1034 ## elif 'Jacobian' in fnspecs: 1035 ## old_fargs, J_par_spec = makePartialJac(old_fnspecs['Jacobian'], [xname], [yname]) 1036 ## fnspecs['Jacobian_pars'] = (fnspecs['Jacobian'][0], J_par_spec) 1037 sysargs_x.update({'fnspecs': fnspecs}) 1038 else: 1039 new_varspecs = varspecs 1040 sysargs_x.varspecs = new_varspecs 1041 sysargs_x.pars['time'] = t 1042 sys_x = Vode_ODEsystem(sysargs_x) 1043 P_x = ContClass(sys_x) 1044 pycont_cache[0] = P_x 1045 else: 1046 P_x = pycont_cache[0] 1047 new_pars = gen.pars.copy() 1048 new_pars.update(filteredDict(vardict, [xname, yname], neg=True)) 1049 new_pars['time'] = t 1050 P_x.model.set(pars=new_pars) 1051 PCargs = args(name='null_curve_x', type='EP-C', force=True) 1052 PCargs.freepars = [yname] 1053 PCargs.MinStepSize = 1e-8 1054 PCargs.VarTol = PCargs.FuncTol = eps*10 1055 PCargs.TestTol = eps 1056 if max_step is None: 1057 PCargs.MaxStepSize = 5e-1 1058 PCargs.StepSize = 1e-6 1059 else: 1060 PCargs.MaxStepSize = max_step[yname] 1061 PCargs.StepSize = max_step[yname]*0.5 1062 PCargs.MaxNumPoints = loop_step 1063 PCargs.MaxCorrIters = 8 1064 #PCargs.verbosity = 100 1065 P_x.newCurve(PCargs) 1066 done = False 1067 num_points = 0 1068 in_subdom = x_init in xinterval and y_init in yinterval 1069 tries = 0 1070 while not done: 1071 try: 1072 P_x['null_curve_x'].forward() 1073 except PyDSTool_ExistError: 1074 tries += 1 1075 if tries == max_tries: 1076 print 'null_curve_x failed in forward direction' 1077 raise 1078 else: 1079 # try a different starting point 1080 x_init = x_init_priority_list[tries] 1081 y_init = y_init_priority_list[tries] 1082 sys_x.set(pars={yname: y_init}, ics={xname: x_init}) 1083 else: 1084 num_points += loop_step 1085 # stop if have tried going forwards more than 5 times and solution is 1086 # still not within domain, or if num_points exceeds max_num_points 1087 done = (num_points > 5*loop_step and not \ 1088 check_bounds(array([P_x['null_curve_x'].new_sol_segment[xname], 1089 P_x['null_curve_x'].new_sol_segment[yname]]).T, 1090 xinterval, yinterval)) \ 1091 or num_points > max_num_points 1092 done = False 1093 num_points = 0 1094 in_subdom = x_init in xinterval and y_init in yinterval 1095 tries = 0 1096 while not done: 1097 try: 1098 P_x['null_curve_x'].backward() 1099 except PyDSTool_ExistError: 1100 tries += 1 1101 if tries == max_tries: 1102 print 'null_curve_x failed in backward direction' 1103 raise 1104 else: 1105 # try a different starting point 1106 x_init = x_init_priority_list[tries] 1107 y_init = y_init_priority_list[tries] 1108 sys_x.set(pars={yname: y_init}, ics={xname: x_init}) 1109 else: 1110 num_points += loop_step 1111 # stop if have tried going backwards more than 5 times and solution is 1112 # still not within domain, or if num_points exceeds max_num_points 1113 done = (num_points > 5*loop_step and not \ 1114 check_bounds(array([P_x['null_curve_x'].new_sol_segment[xname], 1115 P_x['null_curve_x'].new_sol_segment[yname]]).T, 1116 xinterval, yinterval)) \ 1117 or num_points > max_num_points 1118 1119 # overwrite x_null from fsolve, pre-PyCont 1120 # can get repetition of some points 1121 if strict_domains: 1122 x_null = crop_2D(array([P_x['null_curve_x'].sol[xname], 1123 P_x['null_curve_x'].sol[yname]]).T, 1124 xinterval, yinterval) 1125 else: 1126 x_null = array([P_x['null_curve_x'].sol[xname], 1127 P_x['null_curve_x'].sol[yname]]).T 1128 try: 1129 # dummy statement to test result 1130 x_null[:,0], x_null[:,1] 1131 except IndexError: 1132 raise PyDSTool_ExistError('null_curve_x failed: points were out of domain') 1133 x_null = uniquePoints(x_null) 1134 # Add fixed points to nullcline, assuming sufficient accuracy 1135 try: 1136 fp_ixs = [findClosestPointIndex(pt, x_null) for pt in add_fp_pts] 1137 except ValueError: 1138 print "Non-monotonic data computed. Try (1) reducing max_step, (2) crop_tol_pc down to zero if non-monotonicity" 1139 print " is at domain endpoints, or (3) normalize tolerances for fixed points with that of nullclines." 1140 print " Not including fixed points in nullcline data" 1141 else: 1142 for n, ix in enumerate(fp_ixs): 1143 # +n offsets fact that n entries were already added 1144 x_null = np.insert(x_null, ix+n, add_fp_pts[n], axis=0) 1145 1146 else: 1147 x_null = x_null_fsolve 1148 y_null = y_null_fsolve 1149 if do_cache: 1150 return (gen._FScompatibleNamesInv(x_null), gen._FScompatibleNamesInv(y_null), pycont_cache) 1151 else: 1152 return (gen._FScompatibleNamesInv(x_null), gen._FScompatibleNamesInv(y_null)) 1153 1154 1155
1156 -def find_fixedpoints(gen, subdomain=None, n=5, maxsearch=1000, eps=1e-8, 1157 t=0, jac=None):
1158 """Find fixed points of a system in a given sub-domain (dictionary), 1159 on the assumption that they are isolated points. subdomain may contain 1160 pairs (min, max) or singleton values that fix those state variables 1161 and restrict the fixed point search to the remaining sub-system on the 1162 given ranges. (default to domains given in generator). 1163 1164 n = initial number of meshpoints for fsolve (default 5) in each dimension, 1165 but total number of seed points tested is bounded by n**D < maxsearch 1166 (default 1000). 1167 1168 Returns list of dictionaries mapping the variable names to the values. 1169 1170 Set t value for non-autonomous systems (default 0). 1171 """ 1172 # get state variable domains if subdomain dictionary not given 1173 if subdomain is None: 1174 subdomain = filteredDict(gen.xdomain, gen.funcspec.vars) 1175 else: 1176 subdomain = gen._FScompatibleNames(subdomain) 1177 assert remain(subdomain.keys(),gen.funcspec.vars) == [] and \ 1178 remain(gen.funcspec.vars,subdomain.keys()) == [] 1179 # only vary over domains that are given as ranges: fixed values 1180 # are not counted 1181 D = 0 1182 xdict = {}.fromkeys(gen.funcspec.vars) 1183 fixed_vars = {} 1184 for xname, dom in subdomain.iteritems(): 1185 if isinstance(dom, (tuple,list)): 1186 if not (isfinite(dom[0]) and isfinite(dom[1])): 1187 raise RuntimeError("Must specify a finite range for %s"%xname) 1188 D += 1 1189 else: 1190 xdict[xname] = dom 1191 # put fixed vars back into return value 1192 fixed_vars[xname] = dom 1193 var_ix_map = invertMap(gen.funcspec.vars) 1194 # pick n uniformly distributed starting points in domains, 1195 # ensuring n isn't so large as to require more than maxsearch 1196 # number of searches 1197 while True: 1198 if n**D > maxsearch: 1199 n = n-1 1200 else: 1201 break 1202 if n < 3: 1203 raise PyDSTool_ValueError("maxsearch too small") 1204 x0_coords = np.zeros((D,n),'f') 1205 x0_ixs = [] 1206 x0_names = [] 1207 ix = 0 1208 xintervals = [] 1209 # sort names by key to ensure same ordering as generator variables 1210 for xname, xdom in sortedDictItems(subdomain, byvalue=False): 1211 if isinstance(xdom, (tuple, list)): 1212 xintervals.append( Interval('xdom', float, subdomain[xname]) ) 1213 x0_ixs.append(var_ix_map[xname]) 1214 x0_names.append(xname) 1215 x0_coords[ix,:] = linspace(xdom[0], xdom[1], n) 1216 ix += 1 1217 # NOTE: def Rhs(self, t, xdict, pdict) and Jacobian signature 1218 # has same form, so need to use a wrapper function to convert order 1219 # of arguments to suit solver. 1220 # 1221 Rhs_wrap = make_RHS_wrap(gen, xdict, x0_names) 1222 if gen.haveJacobian(): 1223 fprime = make_Jac_wrap(gen, xdict, x0_names) 1224 elif jac is not None: 1225 def Jac_wrap(x, t, pdict): 1226 xdict.update(dict(zip(x0_names, x))) 1227 argdict = filteredDict(xdict, jac._args) 1228 argdict['t'] = t 1229 try: 1230 return array(jac(**argdict)) 1231 except (OverflowError, ValueError): 1232 # penalty 1233 return array([[1e4]*D]*D)
1234 fprime = Jac_wrap 1235 else: 1236 fprime = None 1237 1238 # solve xdot on each starting point 1239 fps = [] 1240 fp_listdict = [] 1241 d_posns = base_n_counter(n,D) 1242 xtol = eps/10. 1243 for dummy_ix in xrange(n**D): 1244 x0 = array([x0_coords[i][d_posns[i]] for i in xrange(D)]) 1245 res = fsolve(Rhs_wrap,x0,(t,gen.pars),xtol=xtol, 1246 fprime=fprime,full_output=True) 1247 xinf_val = res[0] 1248 if D == 1: 1249 xinf_val = array([xinf_val]) 1250 # treat f.p.s within epsilon (2-norm) of each other as identical 1251 if alltrue(isfinite(xinf_val)): 1252 if len(fps) == 0 or not sometrue([norm(fp-xinf_val)<eps for fp in fps]): 1253 ok = res[2]==1 1254 # check that bounds were met 1255 if ok: 1256 for ix, xname in enumerate(x0_names): 1257 ok = ok and \ 1258 xintervals[ix].contains(xinf_val[ix]) is not notcontained 1259 if ok: 1260 fps.append(xinf_val) 1261 fp_pt = dict(zip(x0_names, xinf_val)) 1262 fp_pt.update(fixed_vars) 1263 fp_listdict.append(gen._FScompatibleNamesInv(fp_pt)) 1264 d_posns.inc() 1265 return tuple(fp_listdict) 1266 1267 1268 # convenient aliases 1269 find_steadystates = find_equilibria = find_fixedpoints 1270 1271
1272 -class Point2D(Point):
1273 """Convenience sub-class of PyDSTool.Point for 2D Euclidean points. 1274 If initialized using a dictionary, xname and yname optional arguments 1275 must specify which coordinate is associated with 'x' and 'y' axes. 1276 1277 Main advantage is lower overhead of initialization and the more convenient 1278 notation point.x and point.y for the two coordinates, that may 1279 nonetheless be given optional actual names (xname and yname). Note that 1280 Python's "dot-based" attribute lookup is only valid for 'x' and 'y' fields. 1281 1282 Optional norm order and labels argument still permitted, as for original 1283 Point. For 2D Pointsets, use original PyDSTool.Pointset class. 1284 """
1285 - def __init__(self, x, y=None, xname='x', yname='y', norm=2, labels=None):
1286 """If y is None, expects 2D array or dictionary in x""" 1287 if y is None: 1288 try: 1289 # array x? 1290 self.x = float(x[0]) 1291 self.y = float(x[1]) 1292 except (TypeError, KeyError): 1293 # dictionary x? 1294 self.x = float(x[xname]) 1295 self.y = float(x[yname]) 1296 else: 1297 self.x = float(x) 1298 self.y = float(y) 1299 self.xname = xname 1300 self.yname = yname 1301 self._parameterized = False 1302 if labels is None: 1303 self.labels = {} 1304 else: 1305 self.addlabel(labels) 1306 self._normord = norm 1307 self.coordtype = float 1308 self.dimension = 2 1309 self.coordarray = None # Not implemented this way
1310
1311 - def mapNames(self, themap):
1312 self.xname = themap(self.xname) 1313 self.yname = themap(self.yname) 1314 self.labels = mapNames(themap, self.labels)
1315
1316 - def __contains__(self, coord):
1317 return coord in (self.xname, self.yname)
1318
1319 - def todict(self, aslist=False):
1320 """Convert Point2D to a dictionary of array values (or of list with aslist=True).""" 1321 if aslist: 1322 return {self.xname: [self.x], 1323 self.yname: [self.y]} 1324 else: 1325 return {self.xname: self.x, 1326 self.yname: self.y}
1327
1328 - def toarray(self):
1329 return np.array( (self.x, self.y) )
1330
1331 - def toPoint(self):
1332 """Coerce to regular PyDSTool.Point object""" 1333 return Point(coorddict={xname: self.x, yname: self.y}, 1334 coordtype=float, 1335 norm=self._normord, 1336 labels=self.labels)
1337
1338 - def get(self, coord, d=None):
1339 if coord == self.xname: 1340 return self.x 1341 elif coord == self.yname: 1342 return self.y 1343 else: 1344 return d
1345
1346 - def __len__(self):
1347 return 2
1348
1349 - def __abs__(self):
1350 return np.linalg.norm( (self.x, self.y), self._normord)
1351
1352 - def update(self, d):
1353 self.x = d[self.xname] 1354 self.y = d[self.yname]
1355
1356 - def items(self):
1357 return ((self.xname, self.x), (self.yname, self.y))
1358
1359 - def iteritems(self):
1360 return iter(((self.xname, self.x), (self.yname, self.y)))
1361
1362 - def values(self):
1363 return [self.x, self.y]
1364
1365 - def itervalues(self):
1366 return iter([self.x, self.y])
1367
1368 - def keys(self):
1369 return [self.xname, self.yname]
1370
1371 - def iterkeys(self):
1372 return iter([self.xname, self.yname])
1373
1374 - def has_key(self, k):
1375 return k in (self.xname, self.yname)
1376
1377 - def __getitem__(self, ix):
1378 if ix == 0: 1379 return self.x 1380 elif ix == 1: 1381 return self.y 1382 elif isinstance(ix, slice): 1383 return (self.x, self.y) 1384 else: 1385 raise StopIteration #IndexError("Index out of range")
1386 1387 __call__ = __getitem__ 1388
1389 - def __setitem__(self, ix, val):
1390 if ix == 0: 1391 self.x = val 1392 elif ix == 1: 1393 self.y = val 1394 else: 1395 raise IndexError("Index out of range")
1396
1397 - def __delitem__(self, k):
1398 raise NotImplementedError
1399
1400 - def __str__(self):
1401 return "Point2D( %f, %f )" % (self.x, self.y)
1402
1403 - def __repr__(self):
1404 if self.labels == {}: 1405 labstr = 'no labels' 1406 else: 1407 labstr = 'labels' 1408 return "Point2D( %f, %f, %s, %s, %i, %s )" % (self.x, self.y, 1409 self.xname, self.yname, self._normord, labstr)
1410
1411 - def info(self, verboselevel=1):
1412 if verboselevel == 1: 1413 print self.__str__() 1414 elif verboselvel > 1: 1415 print self.__repr__()
1416
1417 - def __copy__(self):
1418 return Point2D(self.x, self.y, self.xname, self.yname, self._normord, 1419 self.labels)
1420
1421 - def __add__(self, other):
1422 try: 1423 return Point2D( self.x + other[0], self.y + other[1], 1424 self.xname, self.yname, self._normord, self.labels) 1425 except TypeError: 1426 return Point2D( self.x + other, self.y + other, 1427 self.xname, self.yname, self._normord, self.labels )
1428
1429 - def __sub__(self, other):
1430 try: 1431 return Point2D( self.x - other[0], self.y - other[1], 1432 self.xname, self.yname, self._normord, self.labels ) 1433 except TypeError: 1434 return Point2D( self.x - other, self.y - other, 1435 self.xname, self.yname, self._normord, self.labels )
1436 1437 __radd__ = __add__ 1438
1439 - def __rsub__(self, other):
1440 try: 1441 return Point2D( other[0] - self.x, other[1] - self.y, 1442 self.xname, self.yname, self._normord, self.labels ) 1443 except TypeError: 1444 return Point2D( other - self.x, other - self.y, 1445 self.xname, self.yname, self._normord, self.labels )
1446
1447 - def __mul__(self, c):
1448 # scalar 1449 return Point2D( self.x * c, self.y * c, 1450 self.xname, self.yname, self._normord, self.labels)
1451
1452 - def __div__(self, c):
1453 # scalar 1454 return Point2D( self.x / float(c), self.y / float(c), 1455 self.xname, self.yname, self._normord, self.labels )
1456
1457 - def __rmul__(self, c):
1458 # scalar 1459 return Point2D( self.x * c, self.y * c, 1460 self.xname, self.yname, self._normord, self.labels)
1461
1462 - def __rdiv__(self, c):
1463 # scalar 1464 return Point2D( float(c) / self.x, float(c) / self.y, 1465 self.xname, self.yname, self._normord, self.labels )
1466 1467 __truediv__ = __div__ 1468 __rtruediv__ = __rdiv__ 1469
1470 - def __neg__(self):
1471 return Point2D( -self.x, -self.y, 1472 self.xname, self.yname, self._normord, self.labels )
1473
1474 - def __pow__(self, other):
1475 return Point2D( self.x ** other, self.y ** other, 1476 self.xname, self.yname, self._normord, self.labels )
1477
1478 - def __eq__(self, other):
1479 return self.x == other[0] and self.y == other[1]
1480
1481 - def __lt__(self, other):
1482 try: 1483 return np.linalg.norm( (self.x, self.y), self._normord ) < \ 1484 np.linalg.norm( (other.x, other.y), self._normord ) 1485 except AttributeError: 1486 # scalar or array 1487 return np.linalg.norm( (self.x, self.y), self._normord ) < \ 1488 np.linalg.norm( other, self._normord )
1489
1490 - def __gt__(self, other):
1491 try: 1492 return np.linalg.norm( (self.x, self.y), self._normord ) > \ 1493 np.linalg.norm( (other.x, other.y), self._normord ) 1494 except AttributeError: 1495 # scalar or array 1496 return np.linalg.norm( (self.x, self.y), self._normord ) > \ 1497 np.linalg.norm( other, self._normord )
1498
1499 - def __le__(self, other):
1500 try: 1501 return np.linalg.norm( (self.x, self.y), self._normord ) <= \ 1502 np.linalg.norm( (other.x, other.y), self._normord ) 1503 except AttributeError: 1504 # scalar or array 1505 return np.linalg.norm( (self.x, self.y), self._normord ) <= \ 1506 np.linalg.norm( other, self._normord )
1507
1508 - def __ge__(self, other):
1509 try: 1510 return np.linalg.norm( (self.x, self.y), self._normord ) >= \ 1511 np.linalg.norm( (other.x, other.y), self._normord ) 1512 except AttributeError: 1513 # scalar or array 1514 return np.linalg.norm( (self.x, self.y), self._normord ) >= \ 1515 np.linalg.norm( other, self._normord )
1516 1517
1518 -class nullcline(object):
1519 """Nullcline representation class in 2D (x, y) plane parameterizable 1520 by x variable only. A third-order univariate spline will be fitted to 1521 the given sample point array using all as knots, and available 1522 through the 'spline' attribute. 1523 1524 Input: 1525 Names are given to the x and y axis directions at initialization. 1526 Third argument nullc_array is the N-by-2 array of N two-dimensional 1527 data points. 1528 The final, optional argument x_relative_scale indicates the relative 1529 scale of the x values' domain compared to the y values. This is 1530 used to provide a form of subjective normalization for measuring 1531 curvature and other geometric properties when plotting y vs. x 1532 on these scales. E.g if x values range over -50 to +50, while 1533 y ranges over 0 to 1, x_relative_scale=100 is appropriate. 1534 1535 N.B.: Only works for nullclines that can be written y = f(x). Future 1536 versions may support non-functions. 1537 1538 Some functions acting on nullclines currently also rely on monotonicity 1539 of y=f(x), and will verify this using the is_monotonic method before 1540 proceeding. Future versions will attempt to resolve that. 1541 """
1542 - def __init__(self, xname, yname, nullc_array, x_relative_scale=1):
1543 self.xname = xname 1544 self.yname = yname 1545 self.array = nullc_array 1546 # ensure monotonicity 1547 if not isincreasing(nullc_array[:,0]): 1548 raise AssertionError("x axis '%s' values must be monotonically increasing" % xname) 1549 self.spline = InterpolatedUnivariateSpline(nullc_array[:,0], 1550 nullc_array[:,1]) 1551 # store these precomputed values in advance for efficiency 1552 self.x_relative_scale_fac = x_relative_scale 1553 self.x_relative_scale_fac_2 = self.x_relative_scale_fac**2 1554 self.x_relative_scale_fac_3 = self.x_relative_scale_fac**3
1555
1556 - def __call__(self, x):
1557 """Returns a scalar if x is scalar.""" 1558 if isinstance(x, _num_types): 1559 return self.spline(x)[0] 1560 else: 1561 return self.spline(x)
1562
1563 - def createParameterization(self):
1564 """Creates a (non-arc-length) parameterization""" 1565 raise NotImplementedError
1566
1567 - def toPointset(self, with_param=False):
1568 if with_param: 1569 self.createParameterization() 1570 #return arrayToPointset(x_vals, y_vals, self.xname, self.yname, params, 's') 1571 else: 1572 return Pointset({self.xname: self.array[:,0], 1573 self.yname: self.array[:,1]})
1574
1575 - def sample_x_domain(self, refine=0):
1576 return _sample_array_interior(self.array[:,0], 0.01, 1577 refine=refine)
1578
1579 - def tgt_vec(self, x, rescale=False):
1580 """Return tangent vector to an interior point of nullcline, 1581 normalized to length 1. Set rescale option (default False) 1582 to return a vector rescaled according to relative x vs. y scales 1583 (thus it's no longer a tangent vector in that case).""" 1584 try: 1585 der = self.spline(x,1) # first deriv through option to __call__ 1586 except AssertionError: 1587 raise ValueError("Derivative not available for endpoints or outside of spline domain") 1588 else: 1589 if rescale: 1590 tgt = np.array((1, self.x_relative_scale_fac_2*der)) 1591 else: 1592 tgt = np.array((1, der)) 1593 return tgt/np.linalg.norm(tgt)
1594
1595 - def curvature(self, xdata):
1596 """Signed curvature at x is returned (scalar) for scalar x, 1597 otherwise array of such scalars for 1D array x. 1598 1599 Positive values mean concave "up" in the plane.""" 1600 try: 1601 ders = np.array([self.spline.derivatives(x) for x in xdata]) 1602 except AssertionError: 1603 raise ValueError("Derivative not available for endpoints or " 1604 "outside of spline domain") 1605 except TypeError: 1606 # singleton 1607 try: 1608 ders = self.spline.derivatives(xdata) 1609 except AssertionError: 1610 raise ValueError("Derivative not available for endpoints " 1611 "or outside of spline domain") 1612 return self.x_relative_scale_fac_2*ders[2] / \ 1613 pow(1+self.x_relative_scale_fac_2*ders[1]*ders[1],1.5) 1614 else: 1615 return self.x_relative_scale_fac_2*ders[:,2] / \ 1616 np.power(1+self.x_relative_scale_fac_2*ders[:,1]*ders[:,1],1.5)
1617
1618 - def grad_curvature(self, xdata):
1619 """Derivative of the curvature at x is returned (scalar) for scalar x, 1620 otherwise array of such scalars for 1D array x.""" 1621 try: 1622 ders = np.array([self.spline.derivatives(x) for x in xdata]) 1623 except AssertionError: 1624 raise ValueError("Derivative not available for endpoints or " 1625 "outside of spline domain") 1626 except TypeError: 1627 # singleton 1628 try: 1629 ders = self.spline.derivatives(xdata) 1630 except AssertionError: 1631 raise ValueError("Derivative not available for endpoints " 1632 "or outside of spline domain") 1633 denom = pow(1+self.x_relative_scale_fac_2*ders[1]*ders[1],2.5) 1634 return (ders[3]*self.x_relative_scale_fac_2*(1+self.x_relative_scale_fac_2*ders[1]*ders[1]) - \ 1635 3*(self.x_relative_scale_fac_2*self.x_relative_scale_fac_2*ders[2]*ders[2]*ders[1])) / denom 1636 else: 1637 d1_sq = ders[:,1]*ders[:,1] 1638 denom = np.power(1+self.x_relative_scale_fac_2*d1_sq,2.5) 1639 return (ders[:,3]*self.x_relative_scale_fac_2*(1+self.x_relative_scale_fac_2*d1_sq) - \ 1640 3*(self.x_relative_scale_fac_2*self.x_relative_scale_fac_2*ders[:,2]*ders[:,2]*ders[:,1])) / denom
1641
1642 - def curvature_at_sample_points(self, refine=0):
1643 """Returns array of signed scalars for each 1644 x data point (knot) of the nullcline spline (computed just 1645 inside endpoints by 1% of distance to next innermost sample 1646 point). 1647 1648 Positive values mean concave "up" in the plane. 1649 Assumes nullcline is monotonic.""" 1650 return self.curvature(_sample_array_interior(self.array[:,0], 0.01, 1651 refine=refine))
1652
1653 - def grad_curvature_at_sample_points(self, refine=0):
1654 """Returns array of signed scalars for each 1655 x data point (knot) of the nullcline spline (computed just 1656 inside endpoints by 1% of distance to next innermost sample 1657 point). 1658 1659 Assumes nullcline is monotonic.""" 1660 return self.grad_curvature(_sample_array_interior(self.array[:,0], 1661 0.01, refine=refine))
1662
1663 - def concavity(self, xdata):
1664 """Concavity scalar +/- 1 or 0 at x is returned for scalar x, 1665 otherwise array of such scalars for 1D array x. 1666 1667 Positive values mean concave "up" in the plane.""" 1668 return np.sign(self.curvature(xdata))
1669
1670 - def concavity_at_sample_points(self, refine=0):
1671 """Returns array of +/- 1 or 0 scalars for each 1672 x data point (knot) of the nullcline spline (computed just 1673 inside endpoints by 1% of distance to next innermost sample 1674 point). 1675 1676 Positive values mean concave "up" in the plane. 1677 Assumes nullcline is monotonic.""" 1678 return self.concavity(_sample_array_interior(self.array[:,0], 0.01, 1679 refine=refine))
1680
1681 - def crop(self, xdom, ydom, resample_frac=0.1):
1682 """Returns a new Nullcline object cropped to the (x, y) domain 1683 given by pairs xdom and ydom. 1684 1685 If resample_frac > 0 (default 0.1), points at the domain 1686 ends and additional sample points will be included in the new nullcline 1687 object regardless of whether they were in the original sample point set 1688 for the cropped nullcline. New points will be selected at the rate 1689 indicated, measured as a fraction of the domain width. Points that are 1690 as close as 1/10th of this rate to existing sample points will be 1691 excluded, to avoid ill-conditioning the spline shape in the resampling. 1692 1693 Given the assumption that nullclines are montonic functions of x, 1694 this guarantees a unique, contiguous piece of the nullcline is returned. 1695 """ 1696 xinterval = Interval('xdom', float, xdom) 1697 yinterval = Interval('ydom', float, ydom) 1698 sample_vals = list(crop_2D(self.array, xinterval, yinterval)) 1699 if len(sample_vals) == 0: 1700 # force resampling if not already used or if too sparse 1701 if resample_frac == 0 or resample_frac > 0.2: 1702 resample_frac = 0.1 1703 #raise ValueError("No nullcline sample points in given region") 1704 if resample_frac > 0: 1705 width = xdom[1] - xdom[0] 1706 new_xs = xinterval.sample(dt=width*resample_frac) 1707 tol = width*resample_frac*0.1 1708 for x in new_xs: 1709 # exclude any that are within 5% of domain extent of 1710 # existing sample points and within original point extents 1711 if np.all(abs(x - self.array[:,0])>tol) and \ 1712 x >= self.array[0,0] and x <= self.array[-1,0]: 1713 y = self(x) 1714 if yinterval.contains(y) is not notcontained: 1715 sample_vals.append((x,y)) 1716 sample_vals = np.array(sample_vals) 1717 ixs = argsort(sample_vals[:,0]) # sort by x 1718 sample_vals = sample_vals[ixs] 1719 try: 1720 return nullcline(self.xname, self.yname, np.array(sample_vals), 1721 x_relative_scale=self.x_relative_scale_fac) 1722 except: 1723 print "Error cropping nullcline at sample points", sample_vals 1724 print "MAYBE TOO FEW VALUES SAMPLED: number was", len(sample_vals) 1725 raise
1726
1727 - def is_monotonic(self):
1728 return ismonotonic(self.array[:,1])
1729
1730 - def __copy__(self):
1731 return nullcline(self.xname, self.yname, self.array)
1732 1733
1734 -def get_perp(v):
1735 """Find perpendicular vector in 2D (assumes 2D input)""" 1736 vperp=v.copy() # ensures correct return type 1737 vperp[0] = v[1] 1738 vperp[1] = -v[0] 1739 return vperp
1740
1741 -def get_orthonormal(v):
1742 """Returns orthonormal vector in 2D (assumes 2D input)""" 1743 vperp=v.copy() # ensures correct return type 1744 vperp[0] = v[1] 1745 vperp[1] = -v[0] 1746 return vperp/np.linalg.norm(vperp)
1747
1748 -def get_rotated(x, theta):
1749 res = copy.copy(x) # ensure same type of result as input 1750 z=(x[0]+x[1]*1j)*(cos(theta)+sin(theta)*1j) 1751 res[0] = z.real 1752 res[1] = z.imag 1753 return res
1754
1755 -def filter_close_points(pts, eps, normord=2):
1756 """Remove points from iterable pts (e.g. array or Pointset) 1757 according to whether they are closer than epsilon to each other 1758 in the given norm. 1759 """ 1760 # start with all indices, and remove those that are unwanted 1761 remaining = range(len(pts)) 1762 for i, p in enumerate(pts): 1763 for j in range(i+1, len(pts)): 1764 if norm(p-pts[j],normord) < eps: 1765 try: 1766 remaining.remove(j) 1767 except ValueError: 1768 # already removed 1769 pass 1770 return pts[remaining]
1771 1772
1773 -def filter_NaN(pts):
1774 """Filter out any points containing NaNs. 1775 Expects array input. 1776 """ 1777 return array([p for p in pts if all(isfinite(p))])
1778
1779 -def check_bounds(sol_array, xinterval, yinterval):
1780 # x is assumed to be the dynamic variable 1781 return alltrue([xinterval.contains(p[0]) is not notcontained and \ 1782 yinterval.contains(p[1]) is not notcontained for p in sol_array])
1783
1784 -def crop_2D(sol_array, xinterval, yinterval):
1785 """Filter out points that are outside the domains given by xdom, ydom. 1786 Expects array input 1787 """ 1788 return array([p for p in sol_array if xinterval.contains(p[0]) is not notcontained \ 1789 and yinterval.contains(p[1]) is not notcontained])
1790
1791 -def make_vec_at_A_face_B(vec_at_a, Ay, By):
1792 """Assumes monotonicity of functions on which points A and B lie, 1793 using their y components as arguments.""" 1794 up = np.array( (0.0,1.0) ) 1795 orient = 1 # default value 1796 if Ay > By: 1797 # A is above B so vec must orient "down" 1798 if np.dot( up, vec_at_a ) > 0: 1799 # vec is up 1800 orient = -1 1801 else: 1802 # B is above A so vec must orient "up" 1803 if np.dot( up, vec_at_a ) < 0: 1804 # vec is down 1805 orient = -1 1806 return orient * vec_at_a
1807 1808
1809 -def angle_to_vertical(v):
1810 """Return an angle between 0 and 2*pi measured clockwise from vertical.""" 1811 up = np.array((0.,1.)) 1812 theta = np.arccos(np.dot(up, v)/np.linalg.norm(v)) 1813 if np.cross(up, v) > 0: 1814 # angle will be greater than pi 1815 return 2*np.pi - theta 1816 else: 1817 return theta
1818 1819 1820 # not currently used, but helpful if lines are not arranged correctly for intersection test
1821 -def is_counter_clockwise(A,B,C):
1822 return (C.y-A.y)*(B.x-A.x) > (B.y-A.y)*(C.x-A.x)
1823 1824
1825 -def determine_if_intersect(A,B,C,D):
1826 ccw = is_counter_clockwise 1827 return ccw(A,C,D) != ccw(B,C,D) and ccw(A,B,C) != ccw(A,B,D)
1828 1829
1830 -def line_intersection(A, B, C, D):
1831 """Assumes line segments AB vs. CD actually cross (must determine this 1832 separately), where points A - D have fields 'x' and 'y' e.g. a Point2D 1833 object. 1834 1835 Uses algorithm from: 1836 http://www.bryceboe.com/2006/10/23/line-segment-intersection-algorithm/ 1837 and 1838 http://paulbourke.net/geometry/lineline2d/ 1839 1840 In particular, this code assumes lines are not parallel or collinear. 1841 """ 1842 ua = ( (D.x-C.x)*(A.y-C.y)-(D.y-C.y)*(A.x-C.x) ) / \ 1843 ( (D.y-C.y)*(B.x-A.x) - (D.x-C.x)*(B.y-A.y) ) 1844 return Point2D( A.x+(ua*(B.x-A.x)), A.y+(ua*(B.y-A.y)) )
1845 1846
1847 -def project_point(A, C, vec1, vec2=None):
1848 """Project point A onto line given by C and vector vec1, 1849 along the vector vec2. 1850 1851 Assume A and C have fields 'x' and 'y', e.g. is a Point2D object. 1852 vec1 and vec2 (optional) are vectors with fields x and y. 1853 1854 If vec2 is not given, it is chosen to be perpendicular to vec1. 1855 """ 1856 if vec2 is None: 1857 vec2 = get_perp(vec1) 1858 A = Point2D(A) 1859 C = Point2D(C) 1860 B = A + vec2 1861 D = C + vec1 1862 return line_intersection(A, B, C, D)
1863 1864 1865
1866 -class fixedpoint_nD(object):
1867 """IMPORTANT: Any complex eigenvectors are stored as pairs of real eigenvectors, 1868 with the understanding that the corresponding complex eigenvalues indicate the 1869 use of these eigenvectors as a solution basis with the trig functions. 1870 1871 1872 The full, possibly complex eigenvectors are always available using np.linalg(fp.D) 1873 for a fixedpoint_nD object 'fp'. 1874 """ 1875 1876 _classifications = None 1877 _stability = ('s','c','u') 1878
1879 - def __init__(self, gen, pt, coords=None, jac=None, description='', 1880 normord=2, eps=1e-12):
1881 """pt must have same dimension as generator, but if a sub-system 1882 is being analyzed then specify the sub-system's variables using 1883 the coords argument. 1884 """ 1885 if not isinstance(pt, Point): 1886 raise PyDSTool_TypeError("Fixed point must be a Point type") 1887 if coords is None: 1888 self.dimension = gen.dimension 1889 self.fp_coords = gen.funcspec.vars 1890 else: 1891 self.dimension = len(coords) 1892 self.fp_coords = coords 1893 self.eps = eps 1894 self.gen = gen 1895 # redefine jac if necessary and allocate to self attribute 1896 self.jac = jac = self._ensure_jac(jac) 1897 jac_test_arg = filteredDict(pt, self.fp_coords) 1898 jac_test_arg['t'] = 0 1899 if hasattr(jac, '_namemap'): 1900 # jac made with expr2fun 1901 self.D = asarray(jac.alt_call(jac_test_arg)) 1902 else: 1903 try: 1904 self.D = asarray(jac(**jac_test_arg)) 1905 except TypeError: 1906 try: 1907 self.D = asarray(jac(jac_test_arg['t'], [jac_test_arg[xvar] for xvar in self.fp_coords])) 1908 except: 1909 # last chance if taken directly from DS.Jacobian method 1910 try: 1911 self.D = asarray(jac(jac_test_arg['t'], filteredDict(jac_test_arg, self.fp_coords))) 1912 except: 1913 raise 1914 assert self.D.shape == (self.dimension, self.dimension) 1915 assert normord > 0 1916 self.normord = normord 1917 assert pt._normord == normord, "Mismatching norm order for point" 1918 self._get_eigen() 1919 evals = self.evals 1920 evecs = self.evecs 1921 if self.dimension < gen.dimension: 1922 subs_str = "sub-" 1923 else: 1924 subs_str = "full " 1925 if self.dimension != len(evals): 1926 raise ValueError("Dimension of %ssystem must equal number of eigenvalues" % subs_str) 1927 if self.dimension != len(evecs): 1928 raise ValueError("Dimension of %ssystem must equal number of eigenvectors" % subs_str) 1929 if gen.dimension != len(pt): 1930 raise ValueError("Dimension of full system must equal dimension of fixed point") 1931 self.point = pt 1932 # assume autonomous system 1933 var_ixs = [] 1934 for v in self.fp_coords: 1935 var_ixs.append(gen.query('vars').index(v)) 1936 fp_evaluated = array(gen.Rhs(0, gen._FScompatibleNames(pt), gen.pars))[var_ixs] 1937 if sometrue([abs(fp_i) > eps for fp_i in fp_evaluated]): 1938 print "Tolerance =", eps 1939 print "vector field is", fp_evaluated 1940 raise PyDSTool_ValueError("Given point is not a fixed point of the system at given tolerance") 1941 self.coordnames = pt.coordnames 1942 self._classify() 1943 self.description = description
1944
1945 - def _get_eigen(self):
1946 """Uses numpy linalg.eig to compute eigenvalues and right eigenvectors 1947 of this fixed point. 1948 1949 IMPORTANT: Any complex eigenvectors are stored as pairs of real eigenvectors, 1950 with the understanding that the corresponding complex eigenvalues indicate the 1951 use of these eigenvectors as a solution basis with the trig functions. 1952 1953 The full, possibly complex eigenvectors are always available using np.linalg(fp.D) 1954 for a fixedpoint_nD object 'fp'. 1955 """ 1956 evals, evecs_array = np.linalg.eig(self.D) 1957 evecs_array = evecs_array.T 1958 evecs_pt = [] 1959 if all(isreal(evals)): 1960 for i, v in enumerate(self.fp_coords): 1961 d = {} 1962 for j, w in enumerate(self.fp_coords): 1963 d[w] = float(real(evecs_array[i][j])) 1964 evecs_pt.append(Point(d)) 1965 else: 1966 # go through e'vecs and identify repeated real parts, turn the imaginary 1967 # parts into an additional e'vec 1968 for vec in evecs_array: 1969 if all(imag(vec) == 0): 1970 # all real 1971 d = {} 1972 for j, w in enumerate(self.fp_coords): 1973 d[w] = float(real(vec[j])) 1974 evecs_pt.append(Point(d)) 1975 else: 1976 # two eigenvectors, one each from real and imaginary parts 1977 d1 = {} 1978 d2 = {} 1979 for j, w in enumerate(self.fp_coords): 1980 d1[w] = float(real(vec[j])) 1981 d2[w] = float(imag(vec[j])) 1982 p1 = Point(d1) 1983 p2 = Point(d2) 1984 found = False 1985 for p in evecs_pt: 1986 # the complex eigenvectors will come in conjugate pairs, 1987 # so only include one set 1988 if p == p1 or p == -p1 or p == p2 or p == -p2: 1989 found = True 1990 if not found: 1991 evecs_pt.append(p1) 1992 evecs_pt.append(p2) 1993 self.evals = evals 1994 # ! don't norm the evecs, because ones originating from imaginary parts will be off 1995 # relative to the real parts 1996 self.evecs = tuple(evecs_pt)
1997
1998 - def _ensure_jac(self, jac):
1999 if jac is None: 2000 try: 2001 J = self.gen.Jacobian(0, self.gen.initialconditions) 2002 except PyDSTool_ExistError: 2003 # no full Jac available 2004 raise NotImplementedError('Jacobian not available') 2005 else: 2006 # !!! in the future, make a jac function that extracts 2007 # the n components from Jacobian (if n < gen.dimension) 2008 # Must name it jac_fn because otherwise will eval to None 2009 # due to clash with this method's argument 2010 if J.shape[0] == J.shape[1] == self.dimension: 2011 arg_str = ', '.join(self.fp_coords) 2012 entries = [] 2013 for n in range(self.dimension): 2014 entries.append("self.fp_coords[%s]: %s" % (n, self.fp_coords[n]) ) 2015 dict_str = "{" + ",".join(entries) + "})\n" 2016 jac_def_str = "def jac_fn(t, " + arg_str + "):\n\t" + \ 2017 "return self.gen.Jacobian(t, " + dict_str 2018 exec jac_def_str in locals(), globals() 2019 return jac_fn 2020 else: 2021 raise NotImplementedError('Jacobian is not the right shape') 2022 return jac
2023 2024
2025 - def __getitem__(self, k):
2026 return self.point[k]
2027
2028 - def __setitem__(self, k, v):
2029 self.point[k] = v
2030
2031 - def _classify(self):
2032 if self.dimension == 2: 2033 print "Use fixedpoint_2D class" 2034 2035 real_evals = (isreal(self.evals[0]), isreal(self.evals[1])) 2036 equal_evals = abs(self.evals[0] - self.evals[1]) < self.eps 2037 zero_evals = (abs(self.evals[0]) < self.eps, 2038 abs(self.evals[1]) < self.eps) 2039 ## if alltrue(real_evals): 2040 ## sign_evals = (sign(self.evals[0]), sign(self.evals[1])) 2041 ## if sign_evals[0] == sign_evals[1]: 2042 ## self.classification = 'node-like' 2043 ## else: 2044 ## self.classification = 'saddle-like' 2045 ## else: 2046 ## self.classification = 'spiral-like' 2047 real_parts = real(self.evals) 2048 if alltrue(real_parts<0): 2049 self.stability = 's' 2050 elif alltrue(real_parts==0): 2051 self.stability = 'c' 2052 else: 2053 self.stability = 'u' 2054 self.degenerate = sometrue(zero_evals) or equal_evals
2055 2056 2057
2058 -class fixedpoint_2D(fixedpoint_nD):
2059 """IMPORTANT: Any complex eigenvectors are stored as pairs of real eigenvectors, 2060 with the understanding that the corresponding complex eigenvalues indicate the 2061 use of these eigenvectors as a solution basis with the trig functions. 2062 2063 The full, possibly complex eigenvectors are always available using np.linalg(fp.D) 2064 for a fixedpoint_nD object 'fp'. 2065 """ 2066 _classifcations = ('spiral', 'node', 'saddle') 2067
2068 - def __init__(self, gen, pt, coords=None, jac=None, description='', normord=2, eps=1e-12):
2069 fixedpoint_nD.__init__(self, gen, pt, coords, jac, description=description, 2070 normord=normord, eps=eps) 2071 if self.dimension != 2: 2072 raise TypeError("This class is for 2D systems only")
2073
2074 - def _classify(self):
2075 real_evals = (isreal(self.evals[0]), isreal(self.evals[1])) 2076 equal_evals = abs(self.evals[0] - self.evals[1]) < self.eps 2077 zero_evals = (abs(self.evals[0]) < self.eps, 2078 abs(self.evals[1]) < self.eps) 2079 if alltrue(real_evals): 2080 sign_evals = (sign(self.evals[0]), sign(self.evals[1])) 2081 if sign_evals[0] == sign_evals[1]: 2082 self.classification = 'node' 2083 else: 2084 self.classification = 'saddle' 2085 else: 2086 self.classification = 'spiral' 2087 real_parts = real(self.evals) 2088 if alltrue(real_parts<0): 2089 self.stability = 's' 2090 elif alltrue(real_parts==0): 2091 self.stability = 'c' 2092 else: 2093 self.stability = 'u' 2094 self.degenerate = sometrue(zero_evals) or equal_evals
2095
2096 - def toarray(self, restrict_to_coords=None):
2097 """restrict_to_coords option of ordered coordinate names (default None) 2098 ensures the return array only contains values of those coordinates in 2099 that order. Useful for interfacing with plotting commands.""" 2100 if restrict_to_coords is None: 2101 return self.point.coordarray 2102 else: 2103 ixs = self.point._map_names_to_ixs(restrict_to_coords) 2104 return self.point.coordarray[ixs]
2105 2106
2107 -class local_linear_2D(object):
2108 """Create local 2D linear system from a nonlinear system at a specified point. 2109 2110 Currently, this class is specific to conductance-based Hodgkin-Huxley-like models. 2111 It assumes all variables can be given in conditionally-linear form 2112 2113 tau_x(<other_vars>) x' = x_inf(<other_vars>) - x 2114 2115 for some set of other variables (possibly none). Thus, a DSSRT assistant object is passed 2116 in to help with these quantities. 2117 """ 2118
2119 - def __init__(self, model, da, voltage, other_var, init_pt, with_exit_auxfn=True, targlang='python', max_t=5, 2120 name='nulls', extra_vinf_terms='', extra_pars=None):
2121 """da is a DSSRT assistant object associated with the model (for Psi dominance values, not Omegas). 2122 Hold all system vars constant that are not part of the 2D system specified by pair (voltage, other_var). 2123 2124 extra v_inf terms should include a + or -, and should be given as a single string. 2125 """ 2126 self.voltage = voltage 2127 self.other_var = other_var 2128 varsPP_orig = [other_var.replace('.','_'), voltage] 2129 x = other_var.split('.')[-1] # remove any hierarchical components: e.g., Na.m -> m 2130 self.xvar = x 2131 varsPP = [x, 'v'] 2132 if isinstance(model, NonHybridModel): 2133 self.gen = model.registry.values()[0] 2134 elif isinstance(model, Generator.Generator): 2135 self.gen = model 2136 else: 2137 raise TypeError("Pass Generator or NonHybridModel object") 2138 self.varsPP = varsPP 2139 self.varsPP_orig = varsPP_orig 2140 self.da = da 2141 #self.jac_spec, self.new_fnspecs = prepJacobian(self.gen.funcspec._initargs['varspecs'], varsPP_orig, 2142 # self.gen.funcspec._initargs['fnspecs']) 2143 2144 self.gen.set(ics=init_pt) 2145 self.lin = None 2146 2147 # make local linear system spec 2148 DSargs = args() 2149 vfn_str = '(vinf(%s)-v)/tauv' % x 2150 xfn_str = '(%sinf(v)-%s)/tau%s' % (x, x, x) 2151 DSargs.varspecs = {'v': vfn_str, 'vdot': vfn_str} 2152 DSargs.varspecs[x] = xfn_str 2153 DSargs.varspecs[x+'dot'] = xfn_str 2154 DSargs.xdomain = {'v': [-130, 70]} 2155 DSargs.xdomain[x] = [0,1] 2156 2157 if extra_vinf_terms == '': 2158 vinf_def = 'psi%s*(%s-%s0) + vinf0' % (x,x,x) 2159 else: 2160 vinf_def = 'psi%s*(%s-%s0) + vinf0 + %s' % (x,x,x,extra_vinf_terms) 2161 auxdict = {'vinf': ([x], vinf_def), 2162 x+'inf': (['v'], 'D%sinf*(v-v0)+%sinf0' % (x,x)), 2163 'fni'+x: ([x], '(%s-%sinf0)/D%sinf + v0' % (x,x,x))} 2164 2165 ov = varsPP_orig[0] 2166 2167 2168 xinf_defstr = self.gen.funcspec._initargs['varspecs'][self.varsPP_orig[0]+'inf'] 2169 self.Dxinf_quant = Diff(xinf_defstr, voltage) 2170 2171 # initiatialize linear system 2172 pt = self.gen._FScompatibleNames(filteredDict(init_pt, 2173 self.gen._FScompatibleNamesInv(self.gen.funcspec.vars))) 2174 v0 = init_pt[voltage] 2175 x0 = init_pt[other_var] 2176 DSargs.pars = {'v0': v0, x+'0': x0, 2177 'tauv': da.calc_tau(voltage, init_pt), 2178 'tau'+x: da.calc_tau(other_var, init_pt), 2179 'D'+x+'inf': self.Dxinf_quant.eval({voltage: v0}).tonumeric(), 2180 x+'inf0': da.calc_inf(other_var, init_pt), 2181 'psi'+x: da.calc_psi(other_var, init_pt), 2182 'vinf0': da.calc_inf(voltage, init_pt)} 2183 if extra_pars is not None: 2184 DSargs.pars.update(extra_pars) 2185 2186 DSargs.auxvars = [var+'dot' for var in varsPP] 2187 DSargs.fnspecs = auxdict 2188 DSargs.algparams = {'init_step':0.001, 'max_step': 0.001, 'max_pts': 10000} 2189 DSargs.checklevel = 0 2190 DSargs.ics = {'v': init_pt[voltage], x: init_pt[other_var]} 2191 DSargs.tdata = [0, max_t] 2192 DSargs.name = 'lin_'+name 2193 2194 if with_exit_auxfn: 2195 res = make_distance_to_line_auxfn('exit_line', 'exit_fn', 2196 [var+'1' for var in varsPP], True) 2197 man_pars = res['pars'] 2198 man_auxfns = res['auxfn'] 2199 for p in man_pars: 2200 DSargs.pars[p] = 0 2201 2202 DSargs.fnspecs.update(man_auxfns) 2203 thresh_ev = Events.makeZeroCrossEvent(expr='exit_fn(%s,%s)' %(varsPP[0], varsPP[1]), 2204 dircode=0, 2205 argDict={'name': 'exit_ev', 2206 'eventtol': 1e-8, 2207 'eventdelay': 1e-3, 2208 'starttime': 0, 2209 'precise': True, 2210 'active': True, 2211 'term': False}, 2212 varnames=varsPP, 2213 fnspecs=man_auxfns, 2214 parnames=man_pars, 2215 targetlang=targlang 2216 ) 2217 DSargs.events = [thresh_ev] 2218 if targlang == 'c': 2219 print "Warning! Did you delete any existing linear systems?" 2220 self.lin = Generator.Dopri_ODEsystem(DSargs) 2221 else: 2222 self.lin = Generator.Vode_ODEsystem(DSargs) 2223 2224 # build jac info for linear system 2225 self.jac_spec, self.new_fnspecs = prepJacobian(self.lin.funcspec._initargs['varspecs'], varsPP, 2226 self.lin.funcspec._initargs['fnspecs'])
2227 2228
2229 - def localize(self, pt):
2230 """pt is a Point in *all* of the original model var names 2231 """ 2232 pt = filteredDict(pt, self.gen._FScompatibleNamesInv(self.gen.funcspec.vars)) 2233 v0 = pt[self.voltage] 2234 x0 = pt[self.other_var] 2235 x = self.xvar 2236 da = self.da 2237 self.lin.set(pars = {'v0': v0, x+'0': x0, 2238 'tauv': da.calc_tau(self.voltage, pt), 2239 'tau'+x: da.calc_tau(self.other_var, pt), 2240 'D'+x+'inf': self.Dxinf_quant.eval({self.voltage: v0}).tonumeric(), 2241 x+'inf0': da.calc_inf(self.other_var, pt), 2242 'psi'+x: da.calc_psi(self.other_var, pt), 2243 'vinf0': da.calc_inf(self.voltage, pt)}) 2244 self.lin.set(ics={'v': v0, x: x0})
2245 2246
2247 - def analyze(self, pt):
2248 """Perform local analysis at given point in *all* of the original model var names 2249 Re-localization can be avoided (for testing stationarity assumptions) using pt=None. 2250 """ 2251 x = self.xvar 2252 if pt is not None: 2253 self.localize(pt) 2254 varsPP = self.varsPP 2255 self.scope = self.lin.pars.copy() 2256 self.scope.update(self.new_fnspecs) 2257 self.jac_fn = expr2fun(self.jac_spec, ensure_args=['t'], **self.scope) 2258 # expect only one fixed point for the linear system! 2259 vlim = 100 2260 xlim = 1 2261 i = 1 2262 fp_coords = None 2263 while i < 10: 2264 try: 2265 fp_coords = find_fixedpoints(self.lin, subdomain={'v': [-vlim, vlim], 2266 x: [-xlim, xlim]})[0] 2267 except IndexError: 2268 vlim += 200 2269 xlim += 2 2270 i += 1 2271 else: 2272 break 2273 if fp_coords is None: 2274 raise ValueError("No linear system fixed points found!") 2275 else: 2276 fp = fixedpoint_2D(self.lin, Point(fp_coords), coords=varsPP, jac=self.jac_fn) 2277 2278 self.fp = fp 2279 2280 eval_fast_ix = argmax(abs(fp.evals)) 2281 eval_slow_ix = argmin(abs(fp.evals)) 2282 self.eval_fast = fp.evals[eval_fast_ix] 2283 self.evec_fast = fp.evecs[eval_fast_ix] 2284 self.eval_slow = fp.evals[eval_slow_ix] 2285 self.evec_slow = fp.evecs[eval_slow_ix] 2286 2287 # angles associated with eigenvectors 2288 alpha = abs(arctan2( self.evec_slow[x], self.evec_slow['v'] )) 2289 alphap = abs(arctan2( self.evec_fast[x], self.evec_fast['v'] )) 2290 self.s = sin(alphap) / sin(alpha+alphap) 2291 self.s2 = sin(alphap + arctan(self.lin.pars['D%sinf' % self.xvar])) / sin(alpha+alphap) 2292 self.alpha = alpha 2293 self.alphap = alphap 2294 2295 # angles associated with nullclines 2296 self.theta = arctan(self.lin.pars['D%sinf' % self.xvar]) 2297 self.gamma = arctan2(1, self.lin.pars['psi%s' % self.xvar]) 2298 self.phi = self.gamma - self.theta
2299 2300 2301
2302 -def make_distance_to_known_line_auxfn(fname, p, dp=None, q=None):
2303 """Builds definition of an auxiliary function of (x,y) measuring 2304 (signed) distance from a 2D line given by the point p and either 2305 (a) the vector dp, or (b) the point q. (All inputs must be Points.) 2306 """ 2307 if dp is None: 2308 assert q is not None, "Only specify dp or q, not both" 2309 assert len(p)==len(q)==2 2310 other = q 2311 else: 2312 assert q is None, "Only specify dp or q, not both" 2313 assert len(p)==len(dp)==2 2314 other = dp 2315 try: 2316 assert p.coordnames == other.coordnames 2317 except AttributeError: 2318 raise TypeError("Only pass Point objects as inputs") 2319 if dp is None: 2320 q0_m_p0 = q[0]-p[0] 2321 q1_m_p1 = q[1]-p[1] 2322 else: 2323 # based on idea that q - p = dp 2324 # to prevent loss of precision 2325 q0_m_p0 = dp[0] 2326 q1_m_p1 = dp[1] 2327 denom = sqrt(q0_m_p0*q0_m_p0 + q1_m_p1*q1_m_p1) 2328 term = q0_m_p0*p[1]-q1_m_p1*p[0] 2329 if denom == 1.0: 2330 d = '%s-%s*y+%s*x'%(repr(term), repr(q0_m_p0), repr(q1_m_p1)) 2331 else: 2332 d = '(%s-%s*y+%s*x)/%s'%(repr(term), repr(q0_m_p0), 2333 repr(q1_m_p1), repr(denom)) 2334 return {fname: (['x','y'], d)}
2335 2336
2337 -def make_distance_to_line_auxfn(linename, fname, p, by_vector_dp=True):
2338 """Builds definition of an auxiliary function of (x,y) measuring 2339 (signed) distance from a 2D line given at run-time by coordinate 2340 names in the tuple of strings p and either a vector dp, 2341 or a point q, depending on the second input argument. 2342 Also returns list of parameter names used. 2343 """ 2344 assert len(p)==2 and isinstance(p[0], str) and isinstance(p[1], str) 2345 p0 = linename+'_p_'+p[0] 2346 p1 = linename+'_p_'+p[1] 2347 pars = [p0, p1] 2348 if by_vector_dp: 2349 # based on idea that q - p = dp 2350 # to prevent loss of precision 2351 dp0 = linename+'_dp_'+p[0] 2352 dp1 = linename+'_dp_'+p[1] 2353 pars.extend([dp0, dp1]) 2354 q0_m_p0 = dp0 2355 q1_m_p1 = dp1 2356 else: 2357 q0 = linename+'_q_'+p[0] 2358 q1 = linename+'_q_'+p[1] 2359 pars.extend([q0, q1]) 2360 q0_m_p0 = '(%s-%s)'%(q0,p0) 2361 q1_m_p1 = '(%s-%s)'%(q1,p1) 2362 denom = 'sqrt(%s*%s+%s*%s)'%(q0_m_p0,q0_m_p0,q1_m_p1,q1_m_p1) 2363 p1_m_y = '(%s-y)'%(p1) 2364 p0_m_x = '(%s-x)'%(p0) 2365 d = '(%s*%s-%s*%s)/%s'%(q0_m_p0, p1_m_y, p0_m_x, q1_m_p1, denom) 2366 return {'pars': pars, 'auxfn': {fname: (['x','y'], d)}}
2367 2368
2369 -def bisection(func, a, b, args=(), xtol=1e-10, maxiter=400, normord=2):
2370 """Bisection root-finding method. Given a function returning +/- 1 2371 and an interval with func(a) * func(b) < 0, find the root between a and b. 2372 2373 Variant of scipy.optimize.minpack.bisection with exception for too many iterations 2374 """ 2375 eva = func(a,*args) 2376 evb = func(b,*args) 2377 assert (eva*evb < 0), "Must start with interval with func(a) * func(b) <0" 2378 i = 1 2379 while i<=maxiter: 2380 dist = (b-a)/2.0 2381 p = a + dist 2382 # in case dist is a scalar 2383 if norm(np.asarray(dist).flatten(),normord) < xtol: 2384 return p 2385 try: 2386 ev = func(p,*args) 2387 except RuntimeError: 2388 return p 2389 if ev == 0: 2390 return p 2391 i += 1 2392 if ev*eva > 0: 2393 a = p 2394 eva = ev 2395 else: 2396 b = p 2397 raise RuntimeError("Method failed after %d iterations." % maxiter)
2398 2399 2400 2401 2402 # ------ 2403 2404
2405 -class plotter_2D(object):
2406 """Plotting manager for phase plane analysis. A global instance of 2407 this class called 'plotter' is already created and exported by this 2408 module. Operate on that instance directly. 2409 2410 All plotting will take place in figure numbered self.curr_fig, which 2411 will be resized to the given shape and limits. 2412 2413 All added methods of form plot_X take an optional named figure which would 2414 correspond to whatever figure number is allocated in the fig_directory 2415 dictionary attribute. New plot methods of this kind must first check 2416 that do_display is True, and then call the 'setup' method before, and 2417 'teardown' after doing the necessary plotting. 2418 2419 """
2420 - def __init__(self, x_dom=None, y_dom=None, figsize=(6,5), 2421 curr_fig=1, do_display=False):
2422 self.x_dom = x_dom 2423 self.y_dom = y_dom 2424 self.do_display = do_display 2425 self.figsize = figsize 2426 self.curr_fig = 1 2427 self.fig_directory = {} 2428 # layers will permit types of plot data to be removed or restored 2429 # to a given plot axes without recomputation 2430 self.layers = {}
2431
2432 - def set_curr_fig(self, name):
2433 try: 2434 self.curr_fig = self.fig_directory[name] 2435 except KeyError: 2436 raise ValueError("Create entry in fig_directory attribute for this figure name")
2437
2438 - def setup(self, figname):
2439 if figname is not None: 2440 self.set_curr_fig(figname) 2441 pp.figure(self.curr_fig, figsize=self.figsize)
2442
2443 - def teardown(self):
2444 pp.figure(self.curr_fig) 2445 if self.x_dom is not None: 2446 pp.xlim( self.x_dom ) 2447 if self.y_dom is not None: 2448 pp.ylim( self.y_dom ) 2449 pp.draw()
2450
2451 - def plot_nullcline(self, nullc, style, lw=1, N=100, marker='', figname=None):
2452 if not self.do_display: 2453 return 2454 self.setup(figname) 2455 x_data = nullc.array[:,0] 2456 y_data = nullc.array[:,1] 2457 xs = np.sort( np.concatenate( (np.linspace(x_data[0], x_data[-1], N), x_data) ) ) 2458 ys = nullc.spline(xs) 2459 pp.plot(xs, ys, style, linewidth=lw) 2460 if marker != '': 2461 pp.plot(x_data, y_data, style[0]+marker) 2462 self.teardown()
2463
2464 - def plot_vf(self, gen, xname, yname, col='k', N=20, subdomain=None, scale_exp=0, 2465 figname=None):
2466 """Plot vector field in 2D""" 2467 if not self.do_display: 2468 return 2469 self.setup(figname) 2470 plot_PP_vf(gen, xname, yname, N=N, subdomain=subdomain, scale_exp=scale_exp) 2471 self.teardown()
2472
2473 - def plot_fps(self, fps, coords=None, do_evecs=False, markersize=10, figname=None):
2474 """Plot fixed point(s) in 2D""" 2475 if not self.do_display: 2476 return 2477 plot_PP_fps(fps, coords=coords, do_evecs=do_evecs, markersize=markersize) 2478 self.teardown()
2479
2480 - def plot_line_from_points(self, A, B, style, lw=1, figname=None):
2481 if not self.do_display: 2482 return 2483 self.setup(figname) 2484 pp.plot([A[0], B[0]], [A[1], B[1]], style, linewidth=lw) 2485 self.teardown()
2486
2487 - def plot_point(self, A, style, figname=None):
2488 if not self.do_display: 2489 return 2490 self.setup(figname) 2491 pp.plot(A[0], A[1], style) 2492 self.teardown()
2493
2494 - def plot_vline(self, x, style, figname=None):
2495 if not self.do_display: 2496 return 2497 self.setup(figname) 2498 pp.axvline(x, color=style[0], linestyle=style[1]) 2499 self.teardown()
2500
2501 - def plot_hline(self, y, style, figname=None):
2502 if not self.do_display: 2503 return 2504 self.setup(figname) 2505 pp.axhline(y=y, color=style[0], linestyle=style[1]) 2506 self.teardown()
2507 2508 2509 # global instance 2510 global plotter 2511 plotter = plotter_2D() 2512
2513 -def _newton_step(Q0, nullc, A, B, phi, tol):
2514 """Internal function for line intersection involving splines. 2515 Q0 is the initial point on the given nullcline. A closer approximation 2516 Q on the nullcline where it intersects line AB""" 2517 global plotter 2518 # Still need to choose the 0.2 better to ensure correct scaling with A and B 2519 2520 err = 1000*tol 2521 Q = Q0 2522 while err > tol: 2523 # Q0 prime is a distant point on the spline's tangent line at Q0 2524 Q_prime = Q + 0.2*nullc.tgt_vec(Q.x) 2525 #plotter.plot_line_from_points(Q, Q_prime, 'b') 2526 P1 = line_intersection(Q, Q_prime, A, B) 2527 Q1 = Point2D(P1.x, nullc.spline(P1.x)) # project onto spline 2528 #plotter.plot_point(Q1, 'ko') 2529 err = abs(phi-angle_to_vertical(Q1-A)) 2530 Q = Q1 2531 return Q
2532 2533
2534 -def find_nearest_sample_points_by_angle(thetas, phi, return_NaN=False):
2535 """Returns two successive indices of the angles in the array argument 2536 thetas, for which theta values straddle angle phi. 2537 2538 This function is useful if phi represents the angle from the vertical of 2539 the normal to a curve's tangent line, and thetas are angles from the 2540 vertical of another curve in the plane. This function can therefore help 2541 find closest or furthest perpendicular distances between curves. 2542 2543 Assumes thetas are in increasing order, and that phi is properly contained 2544 between min(thetas) and max(thetas) (otherwise raises a ValueError). 2545 2546 For the corner case where one difference between angles is exactly 0 (not 2547 generic), one of the thetas values given by the returned indices is 2548 exactly phi.""" 2549 global plotter 2550 pos_diffs = (thetas - phi) > 0 2551 if np.all(pos_diffs) or not np.any(pos_diffs): 2552 if return_NaN: 2553 return np.NaN 2554 else: 2555 raise ValueError("Outside domain") 2556 else: 2557 if pos_diffs[0]: 2558 # Trues then Falses in pos_diffs 2559 # first_neg_ix guaranteed not to be 0 2560 first_neg_ix = np.argmin(np.asarray(pos_diffs, int)) 2561 straddling_ixs = (first_neg_ix-1, first_neg_ix) 2562 else: 2563 # Falses then Trues in pos_diffs 2564 first_pos_ix = np.argmax(np.asarray(pos_diffs, int)) 2565 straddling_ixs = (first_pos_ix-1, first_pos_ix) 2566 return straddling_ixs
2567 2568
2569 -def closest_perp_distance_between_sample_points(NullcA, NullcB, xa, x0B, x1B, 2570 newt_tol=1e-6):
2571 """Closest perpendicular distance from (xa, ya) on Nullcline A to Nullcline B, 2572 given that it's known to be between sample points x0B and x1B on Nullcline B. 2573 2574 Uses a Newton-like step, solving up to an angular tolerance given by newt_tol. 2575 """ 2576 global plotter 2577 ya = NullcA(xa) 2578 a = np.array([xa,ya]) 2579 tgt_vec_at_a = NullcA.tgt_vec(xa, rescale=True) 2580 # normal has length 1 2581 normal_at_a = make_vec_at_A_face_B(get_orthonormal(tgt_vec_at_a), 2582 ya, NullcB(xa)) 2583 #plotter.plot_line_from_points(a, a+tgt_vec_at_a, 'k:') 2584 phi = angle_to_vertical(normal_at_a) 2585 2586 A = Point2D(a) 2587 B = Point2D(a + 0.2*normal_at_a) 2588 C = Point2D(x0B, NullcB(x0B)) 2589 D = Point2D(x1B, NullcB(x1B)) 2590 2591 P0 = line_intersection(A, B, C, D) 2592 Q0 = Point2D(P0.x, NullcB(P0.x)) # project onto spline 2593 #theta0 = angle_to_vertical(Q0-a) 2594 2595 #plotter.plot_line_from_points(A, B, 'k-') 2596 #plotter.plot_line_from_points(C, D, 'r--') 2597 #plotter.plot_point(P0, 'gs') 2598 #plotter.plot_point(Q0, 'ys') 2599 2600 try: 2601 Q = _newton_step(Q0, NullcB, A, B, phi, newt_tol) 2602 except ValueError: 2603 return np.Inf 2604 else: 2605 vec = Q-A 2606 #vec[0] = vec[0] / 100. # hack to investigate differing x and y scales 2607 return np.linalg.norm(vec)
2608 2609
2610 -def closest_perp_distance_on_spline(NullcA, NullcB, xa):
2611 """Closest perpendicular distance to Nullcline B at point given by xa on Nullcline A""" 2612 global plotter 2613 A = Point2D(xa, NullcA(xa)) 2614 a_to_NullcB = NullcB.array - A 2615 thetas_B = np.apply_along_axis(angle_to_vertical, -1, a_to_NullcB) 2616 # monotonicity assumptions ensures that y coord of other nullcline 2617 # indicates which direction normal vector must face 2618 # ... also assumes that nullcline B is defined for all x values that 2619 # nullcline A is 2620 norm_vec_at_a = make_vec_at_A_face_B(get_orthonormal( \ 2621 NullcA.tgt_vec(xa, rescale=True)), 2622 A.y, NullcB(xa)) 2623 phi_a = angle_to_vertical(norm_vec_at_a) 2624 #plotter.plot_line_from_points(A, A + norm_vec_at_a, 'k-.') 2625 2626 # determine closest sample points 2627 try: 2628 ix_B_lo, ix_B_hi = find_nearest_sample_points_by_angle(thetas_B, phi_a) 2629 except ValueError: 2630 # Outside domain of other nullcline 2631 return np.Inf 2632 else: 2633 if ix_B_lo == ix_B_hi: 2634 ix_B_lo = np.max( [0, ix_B_lo-1] ) 2635 ix_B_hi = np.min( [len(NullcB.array)-1, ix_B_hi+1] ) 2636 x0B = NullcB.array[ix_B_lo,0] 2637 x1B = NullcB.array[ix_B_hi,0] 2638 2639 # refine to closest point on spline of Nullcline B 2640 return closest_perp_distance_between_sample_points(NullcA, NullcB, xa, x0B, x1B)
2641 2642
2643 -def is_min_bracket(triple):
2644 """For a triple of positive numbers (a,b,c), returns a triple 2645 (boolean, index, error) 2646 2647 where: 2648 boolean is True if a > b < c (this brackets a minimum value); 2649 2650 index is 1 (middle) if input brackets a minimum value, otherwise 2651 index indicates which end of the bracket has the smallest value; 2652 2653 error is the smallest absolute difference between distances in the 2654 triple (a, b, c) 2655 """ 2656 a,b,c = triple 2657 if b < a and b < c: 2658 return True, 1, min( a-b, c-b ) 2659 else: 2660 if a < c: 2661 # then monotonic a < b < c 2662 return False, 0, min( b-a, c-b ) 2663 else: 2664 # a > c, monotonic a > b > c 2665 return False, 2, min( a-b, b-c )
2666 2667
2668 -def _sample_array_interior(xdata, pc, refine=0):
2669 """xdata is a 1D array. 2670 """ 2671 # Consider moving into Interval class as a method... 2672 all_xs = [] 2673 if refine > 0: 2674 dxs = xdata[1:] - xdata[:-1] 2675 refine_dxs = 1/(1.0+refine) * dxs 2676 for i, x in enumerate(xdata[:-1]): 2677 all_xs.extend(list(x+refine_dxs[i]*arange(0, refine+1))) 2678 all_xs.append(xdata[-1]) 2679 xs = np.array(all_xs) 2680 else: 2681 xs = xdata.copy() 2682 xs[0] += pc*(xs[1] - xs[0]) 2683 xs[-1] -= pc*(xs[-1] - xs[-2]) 2684 return xs
2685 2686
2687 -def closest_perp_distance_between_splines(NullcA, NullcB, dist_delta_tol=1e-5, 2688 strict=True):
2689 """Measure closest perpendicular distance between two nullcline objects 2690 (via their spline representations). 2691 2692 Tolerance argument measures that of Euclidean distance between the splines 2693 representing the two nullclines. 2694 2695 strict option (default True) ensures both nullclines are monotonically 2696 increasing or decreasing functions of x. If False, on nullcline A must be, 2697 while nullcline B non-monotonicity will simply lead to a warning message 2698 (since B plays an asymmetric role to A in the calculation, its monotonicity 2699 is less critical). 2700 2701 Assumes monotonicity of both nullclines, which may be relaxed in future 2702 versions. 2703 """ 2704 assert NullcA.is_monotonic(), "Nullcline A must be monotonic" 2705 Bmon = NullcB.is_monotonic() 2706 if strict: 2707 assert Bmon, "Nullcline B must be monotonic" 2708 else: 2709 if not Bmon: 2710 print "Warning: nullcline B is not monotonic in closest_perp_distance_between splines" 2711 # search interior sample points of spline A first, and 1% inside endpoints 2712 xa_search_vals = _sample_array_interior(NullcA.array[:,0], 0.01) 2713 dists = [closest_perp_distance_on_spline(NullcA, NullcB, xa) for xa in xa_search_vals] 2714 2715 if isincreasing(dists): 2716 xa_start_ix1 = 0 2717 xa_start_ix2 = 1 2718 xa_ix = 0 2719 elif isincreasing(dists[::-1]): 2720 xa_start_ix1 = len(dists)-2 2721 xa_start_ix2 = len(dists)-1 2722 xa_ix = len(dists)-1 2723 else: 2724 xa_ix = np.argmin(dists) # used later too to recover associated xa value used 2725 xa_start_ix1 = xa_ix ##+ 1 # ignored spline endpoint 2726 other_dists = dists[:] 2727 other_dists[xa_ix] = np.Inf 2728 if np.any( np.isfinite( np.array(other_dists) )): 2729 xa_start_ix2 = np.argmin(other_dists) ##+ 1 2730 if xa_start_ix2 < xa_start_ix1: 2731 # switch 2732 xa_start_ix1, xa_start_ix2 = (xa_start_ix2, xa_start_ix1) 2733 else: 2734 # both ix1 and ix2 would be the same, so extend search over next 2735 # neighboring indices, if available 2736 xa_start_ix2 = min([len(dists), xa_start_ix1 + 1]) 2737 xa_start_ix1 = max([0,xa_start_ix1 - 1]) 2738 2739 # Minimization method from xa over given indices in NullcA.array[ix1:ix2,0] 2740 # except if either is an endpoint, in which case move in until distance to 2741 # NullcB becomes well-defined 2742 xa_lo = xa_search_vals[xa_start_ix1] 2743 d_lo = dists[xa_start_ix1] 2744 xa_hi = xa_search_vals[xa_start_ix2] 2745 d_hi = dists[xa_start_ix2] 2746 2747 # Old code in case there was a problem proceeding using endpoints 2748 ## if xa_start_ix1 > 0: 2749 ## xa_lo = xa_search_vals[xa_start_ix1] 2750 ## d_lo = dists[xa_start_ix1] 2751 ## else: 2752 ## raise NotImplementedError 2753 ## 2754 ## if xa_start_ix2 < len(NullcA.array): 2755 ## xa_hi = xa_search_vals[xa_start_ix2] 2756 ## d_hi = dists[xa_start_ix2] 2757 ## else: 2758 ## raise NotImplementedError 2759 2760 assert xa_hi > xa_lo 2761 2762 # given sufficient number of sample points for nullclines... 2763 # concavity test for NullcA: shortest dist either at endpoints (monotonic assumed), 2764 # or in interior (quadratic-like minimization). 2765 2766 xa_mid = xa_search_vals[xa_ix] # may be the same as one of the endpoints 2767 if xa_mid in (xa_lo, xa_hi): 2768 # xa_mid is the same as an endpoint 2769 # split the difference midway 2770 xa_mid = 0.5*(xa_lo+xa_hi) 2771 d_mid = closest_perp_distance_on_spline(NullcA, NullcB, xa_mid) 2772 else: 2773 d_mid = dists[xa_ix] 2774 2775 # Begin bisection-like linearly-convergent minimization (with convergence 2776 # guarantee due to bracketing) 2777 x_bracket = (xa_lo, xa_mid, xa_hi) 2778 d_bracket = (d_lo, d_mid, d_hi) 2779 2780 is_brack, ix_min, error = is_min_bracket(d_bracket) 2781 #plotter.do_display = True 2782 2783 while error > dist_delta_tol: 2784 a, b, c = x_bracket 2785 if is_brack: 2786 b_prime = 0.5*(a + b) # pick side to the "left" (w.l.o.g.) 2787 d_new = closest_perp_distance_on_spline(NullcA, NullcB, b_prime) 2788 # test bracket 1 is xa_lo, xa_new, xa_mid 2789 test_bracket1 = (d_bracket[0], d_new, d_bracket[1]) 2790 is_brack_test, ix_min, d_error = is_min_bracket(test_bracket1) 2791 if is_brack_test: 2792 # done 2793 x_bracket = (a, b_prime, b) 2794 d_bracket = test_bracket1 2795 is_brack = True 2796 error = d_error 2797 continue # while 2798 # test bracket 2 is xa_new, xa_mid, xa_hi 2799 test_bracket2 = (d_new, d_bracket[1], d_bracket[2]) 2800 is_brack_test, ix_min, d_error = is_min_bracket(test_bracket2) 2801 if is_brack_test: 2802 # done 2803 x_bracket = (b_prime, b, c) 2804 d_bracket = test_bracket2 2805 is_brack = True 2806 error = d_error 2807 # continue while 2808 else: 2809 # true d function may be monotonic, but must try to find a better 2810 # intermediate point that creates a bracket. 2811 2812 # assuming minimum exists for d in this range, 2813 # pick a point between the two smallest d's and continue search for bracket. 2814 if ix_min == 0: 2815 b_prime = 0.5*(a + b) 2816 d_new = closest_perp_distance_on_spline(NullcA, NullcB, b_prime) 2817 # does the new selection create a bracket? 2818 # test bracket 1 is xa_lo, xa_new, xa_mid 2819 d_bracket = (d_bracket[0], d_new, d_bracket[1]) 2820 x_bracket = (a, b_prime, b) 2821 else: 2822 # ix_min == 2 2823 b_prime = 0.5*(b + c) 2824 d_new = closest_perp_distance_on_spline(NullcA, NullcB, b_prime) 2825 # does the new selection create a bracket? 2826 # test bracket 1 is xa_mid, xa_new, xa_hi 2827 d_bracket = (d_bracket[1], d_new, d_bracket[2]) 2828 x_bracket = (b, b_prime, c) 2829 is_brack_test, ix_min, error = is_min_bracket(d_bracket) 2830 return x_bracket[ix_min], d_bracket[ix_min]
2831 2832 2833 # EXPERIMENTAL CLASS
2834 -class dx_scaled_2D(object):
2835 """Supports a delta x vector that automatically re-scales 2836 according to the known scalings of each of the vector's component 2837 directions. 2838 2839 Highly experimental class! 2840 """ 2841
2842 - def __init__(self, dx, scale=(1,1)):
2843 assert isreal(dx) # and dx>0 2844 self.dx = dx 2845 assert len(scale)==2 and scale[0] > 0 and scale[1] > 0 2846 m = max(scale) 2847 self.x_scale = scale[0]/m 2848 self.y_scale = scale[1]*1./scale[0]/m
2849
2850 - def __call__(self, dir):
2851 # angle is between 0 and 1: 0 = entirely in x-axis, 1 = entirely in y-axis 2852 # angle gives relative y-axis contribution of dir vector 2853 try: 2854 angle = 2*atan2(dir[1],dir[0])/pi 2855 except TypeError: 2856 # e.g. unsubscriptable object, from a __mul__ call 2857 return self.dx*dir 2858 else: 2859 return self.dx*dir*(angle*self.y_scale+(1-angle))
2860 2861 __mul__ = __call__ 2862
2863 - def __rmul__(self, dir):
2864 # angle is between 0 and 1: 0 = entirely in x-axis, 1 = entirely in y-axis 2865 # angle gives relative y-axis contribution of dir vector 2866 try: 2867 angle = 2*atan2(dir[1],dir[0])/pi 2868 except TypeError: 2869 # e.g. unsubscriptable object 2870 return dx_scaled_2D(self.dx*dir, (1,self.y_scale)) 2871 else: 2872 return self.dx*dir*(angle*self.y_scale+(1-angle))
2873 2874
2875 -def find_saddle_manifolds(fp, dx=None, dx_gamma=None, dx_perp=None, tmax=None, 2876 max_len=None, ic=None, 2877 ic_dx=None, max_pts=1000, directions=(1,-1), 2878 which=('s', 'u'), other_pts=None, rel_scale=None, 2879 dx_perp_fac=0.75, verboselevel=0, fignum=None):
2880 """Compute any branch of the stable or unstable sub-manifolds of a saddle. 2881 Accepts fixed point instances of class fixedpoint_2D. 2882 2883 Required inputs: 2884 fp: fixed point object 2885 dx: arc-length step size (**fixed**) 2886 dx_gamma: determines the positions of the Gamma_plus and Gamma_minus 2887 event surfaces (can be a real scalar or a pair if not symmetric) 2888 dx_perp: initial perturbation from the local linear sub-manifolds to 2889 find starting points. 2890 tmax: maximum time to compute a trajectory before 'failing' 2891 max_len: maximum arc length to compute 2892 max_pts: maximum number of points to compute on each sub-manifold branch 2893 Specify either ic or ic_dx for initial point (e.g. to restart the calc 2894 after a previous failure) or a certain distance from the saddle point. 2895 2896 Optional inputs: 2897 rel_scale: a pair giving relative scalings of x and y coordinates in 2898 the plane, to improve stepping in the different directions. 2899 e.g. (1,10) would make dx steps in the y-direction 10 times larger than 2900 in the x-direction. 2901 2902 which: which sub-manifold to compute 's', 'u' or ('s', 'u'). 2903 Default is both. 2904 2905 directions: which directions along chosen sub-manifolds? (1,), (-1,) 2906 or (1,-1). Default is both. 2907 2908 other_pts can be a list of points whose proximity will be checked, 2909 and the computation halted if they get within dx of the manifold. 2910 2911 dx_perp_fac: For advanced use only. If you get failures saying dx_perp 2912 too small and that initial displacement did not straddle manifold, try 2913 increasing this factor towards 1 (default 0.75). Especially for 2914 unstable manifolds, initial values for dx_perp may diverge, but if 2915 dx_perp is shrunk too quickly with this factor the sweet spot may be 2916 missed. 2917 2918 verboselevel 2919 fignum 2920 """ 2921 2922 assert fp.classification == 'saddle' and not fp.degenerate 2923 if fp.evals[0] < 0: 2924 eval_s = fp.evals[0] 2925 eval_u = fp.evals[1] 2926 evec_s = fp.evecs[0] 2927 evec_u = fp.evecs[1] 2928 else: 2929 eval_s = fp.evals[1] 2930 eval_u = fp.evals[0] 2931 evec_s = fp.evecs[1] 2932 evec_u = fp.evecs[0] 2933 gen = fp.gen 2934 assert 'Gamma_out_plus' in gen.eventstruct, "Detection event surface(s) not present" 2935 assert 'Gamma_out_minus' in gen.eventstruct, "Detection event surface(s) not present" 2936 # Dividing fixed point's inherited epsilon tolerance by 100 2937 eps = fp.eps / 100 2938 dx_perp_eps = 1e-12 2939 if dx_perp_fac >= 1 or dx_perp_fac <= 0: 2940 raise ValueError("dx_perp_fac must be between 0 and 1") 2941 normord = fp.normord 2942 if rel_scale is None: 2943 rel_scale = (1,1) 2944 dxscaled = dx_scaled_2D(dx, rel_scale) 2945 if isinstance(dx_gamma, dict): 2946 assert len(dx_gamma) == 2, "Invalid value for dx_gamma" 2947 assert remain(dx_gamma.keys(), [1,-1]) == [], \ 2948 "Invalid value for dx_gamma" 2949 else: 2950 try: 2951 dx_gamma = {1: dx_gamma, -1: dx_gamma} 2952 except: 2953 raise TypeError("Invalid type for dx_gamma") 2954 2955 def test_fn(x, dircode): 2956 if verboselevel>1: 2957 print "Test point", x[x.coordnames[0]], x[x.coordnames[1]], "in direction", dircode, "\n" 2958 gen.set(ics=x) 2959 try: 2960 test = gen.compute('test', dirn=dircode) 2961 except: 2962 raise RuntimeError("Integration failed") 2963 events = gen.getEvents() 2964 if verboselevel>1: 2965 pts=test.sample(coords=x.coordnames) 2966 plot(pts[x.coordnames[0]][:25],pts[x.coordnames[1]][:25],'b-') 2967 if events['Gamma_out_plus'] is None: 2968 if events['Gamma_out_minus'] is None: 2969 if verboselevel>1: 2970 pts=test.sample(coords=x.coordnames) 2971 print "Last computed point was\n", pts[-1] 2972 print "...after time", pts['t'][-1] 2973 plot(pts[x.coordnames[0]],pts[x.coordnames[1]],'b-') 2974 raise RuntimeError("Did not reach Gamma surfaces") 2975 else: 2976 # hit Gamma_out_minus 2977 if verboselevel>1: 2978 print "Reached Gamma minus at t=", events['Gamma_out_minus']['t'][0] 2979 sgn = -1 2980 else: 2981 if events['Gamma_out_minus'] is None: 2982 # hit Gamma_out_plus 2983 if verboselevel>1: 2984 print "Reached Gamma plus at t=", events['Gamma_out_plus']['t'][0] 2985 sgn = 1 2986 else: 2987 if verboselevel>1: 2988 pts=test.sample(coords=x.coordnames) 2989 print "Last computed point was\n", pts[-1] 2990 print "...after time", pts['t'][-1] 2991 plot(pts[x.coordnames[0]],pts[x.coordnames[1]],'b-') 2992 raise RuntimeError("Did not reach Gamma surfaces") 2993 return sgn
2994 2995 def onto_manifold(x_ic, dn, normal_dir, dircode='f'): 2996 try: 2997 return bisection(test_fn, x_ic+dn*normal_dir, x_ic-dn*normal_dir, 2998 args=(dircode,), xtol=eps, maxiter=100, 2999 normord=normord) 3000 except AssertionError: 3001 if verboselevel>1: 3002 xp = x_ic+dn*normal_dir 3003 xm = x_ic-dn*normal_dir 3004 plot(xp[var_x], xp[var_y], 'gx') 3005 plot(xm[var_x], xm[var_y], 'gx') 3006 raise RuntimeError("dx_perp too small? +/- initial displacement did not straddle manifold") 3007 except RuntimeError: 3008 if verboselevel>1: 3009 xp = x_ic+dn*normal_dir 3010 xm = x_ic-dn*normal_dir 3011 plot(xp[var_x], xp[var_y], 'gx') 3012 plot(xm[var_x], xm[var_y], 'gx') 3013 raise 3014 3015 gen.eventstruct['Gamma_out_plus'].activeFlag=True # terminal 3016 gen.eventstruct['Gamma_out_minus'].activeFlag=True # terminal 3017 var_x = fp.point.coordnames[0] 3018 var_y = fp.point.coordnames[1] 3019 assert tmax > 0 3020 manifold = {} 3021 man_names = {'s': 'stable', 'u': 'unstable'} 3022 3023 for w in which: 3024 ### w = 's' => stable branch 3025 ### w = 'u' => unstable branch 3026 if verboselevel>0: 3027 print "Starting %s branch" % man_names[w] 3028 if w == 's': 3029 col = 'g' 3030 w_sgn = -1 3031 integ_dircode = 'f' 3032 evec = evec_u 3033 evec_other = evec_s 3034 elif w == 'u': 3035 col = 'r' 3036 w_sgn = 1 3037 integ_dircode = 'b' 3038 evec = evec_s 3039 evec_other = evec_u 3040 # set Gamma_out surfaces on "outgoing" branch 3041 # (polarity is arbitrary) 3042 p0_plus = fp.point + dx_gamma[1]*evec 3043 p0_minus = fp.point - dx_gamma[-1]*evec 3044 evec_perp = get_perp(evec) 3045 # !! Must choose the event directions correctly 3046 # Algorithm should be based on: event dircode = 1 3047 # if ev(traj(ev_t - delta)) < 0 and ev(traj(ev_t + delta)) > 0 3048 # where the evec along the flow towards the event surfaces 3049 # determines which is "before" and which is "after" the 3050 # event surface (time may be reversed depending on which 3051 # manifold is being computed) 3052 print "Set these event directions according to your problem..." 3053 gen.eventstruct.setEventDir('Gamma_out_plus', -1) 3054 gen.eventstruct.setEventDir('Gamma_out_minus', 1) 3055 gen.set(pars={'Gamma_out_plus_p_'+var_x: p0_plus[var_x], 3056 'Gamma_out_plus_p_'+var_y: p0_plus[var_y], 3057 'Gamma_out_plus_dp_'+var_x: evec_perp[var_x], 3058 'Gamma_out_plus_dp_'+var_y: evec_perp[var_y], 3059 'Gamma_out_minus_p_'+var_x: p0_minus[var_x], 3060 'Gamma_out_minus_p_'+var_y: p0_minus[var_y], 3061 'Gamma_out_minus_dp_'+var_x: evec_perp[var_x], 3062 'Gamma_out_minus_dp_'+var_y: evec_perp[var_y], 3063 ## 'fp_'+var_x: fp.point[var_x], 'fp_'+var_y: fp.point[var_y] 3064 }, 3065 tdata = [0,tmax]) 3066 if verboselevel>1: 3067 if fignum is None: 3068 fignum=figure() 3069 else: 3070 figure(fignum) 3071 plot([p0_plus[var_x]-dxscaled*evec_perp[var_x],p0_plus[var_x]+dxscaled*evec_perp[var_x]], 3072 [p0_plus[var_y]-dxscaled*evec_perp[var_y],p0_plus[var_y]+dxscaled*evec_perp[var_y]], 'k-', linewidth=2) 3073 plot([p0_minus[var_x]-dxscaled*evec_perp[var_x],p0_minus[var_x]+dxscaled*evec_perp[var_x]], 3074 [p0_minus[var_y]-dxscaled*evec_perp[var_y],p0_minus[var_y]+dxscaled*evec_perp[var_y]], 'k-', linewidth=2) 3075 draw() 3076 check_other_pts = other_pts is not None 3077 piece = {} 3078 if ic_dx is None: 3079 ic_dx = dxscaled 3080 else: 3081 ic_dx = dx_scaled_2D(ic_dx, rel_scale) 3082 if ic is None: 3083 ic = fp.point 3084 f_ic = -w_sgn * evec_other 3085 curve_len = 0 3086 # initial estimate x0 = a point close to f.p. along manifold with 3087 # opposite stability 3088 else: 3089 # initial curve length from previous independent variable, if present 3090 # otherwise, assume zero 3091 if isinstance(ic, Pointset): 3092 assert len(ic) == 1, "Only pass a length-1 pointset" 3093 # guarantee curve_len > 0 3094 curve_len = abs(ic['arc_len'][0]) 3095 ic = ic[0] 3096 else: 3097 curve_len = 0 3098 f_ic = -w_sgn * gen.Rhs(0, ic, gen.pars) # array 3099 for sgn in directions: 3100 if verboselevel>0: 3101 print "Starting direction", sgn 3102 # PREDICTION 3103 x0_ic = ic+w_sgn*sgn*ic_dx*f_ic/norm(f_ic, normord) 3104 if verboselevel>1: 3105 figure(fignum) 3106 plot(x0_ic[var_x], x0_ic[var_y], 'go', linewidth=1) 3107 # put x0 initial estimate onto stable manifold 3108 f = -w_sgn * gen.Rhs(0, x0_ic, gen.pars) # array 3109 norm_to_flow = get_perp(f/norm(f, normord)) 3110 if verboselevel>1: 3111 plot([x0_ic[var_x], x0_ic[var_x]+dxscaled*f[0]/norm(f,normord)], 3112 [x0_ic[var_y], x0_ic[var_y]+dxscaled*f[1]/norm(f,normord)], 3113 'r-') 3114 plot([x0_ic[var_x], x0_ic[var_x]+dxscaled*norm_to_flow[0]], 3115 [x0_ic[var_y], x0_ic[var_y]+dxscaled*norm_to_flow[1]], 3116 'r:') 3117 dx_perp_default = dx_perp 3118 # CORRECTION 3119 while dx_perp > dx_perp_eps: 3120 try: 3121 x = onto_manifold(x0_ic, dx_perp, norm_to_flow, 3122 dircode=integ_dircode) 3123 except RuntimeError, e: 3124 dx_perp *= dx_perp_fac 3125 else: 3126 break 3127 if dx_perp <= dx_perp_eps: 3128 # RuntimeError was raised and could not continue reducing dx_perp 3129 print "dx_perp reached lower tolerance =", dx_perp_eps 3130 print e 3131 raise RuntimeError("Initial point did not converge") 3132 else: 3133 curve_len += norm(x-ic, normord) 3134 piece[sgn*curve_len] = x 3135 num_pts = 1 3136 last_x = x 3137 if verboselevel>0: 3138 print "Initial point converged to (%.6f, %.6f)\n" % \ 3139 (x[var_x], x[var_y]) 3140 dx_perp = dx_perp_default 3141 last_f = f_ic 3142 # step backwards along flow 3143 while curve_len < max_len and num_pts < max_pts: 3144 if verboselevel>0: 3145 figure(fignum) 3146 plot(last_x[var_x], last_x[var_y], col+'.', linewidth=1) 3147 if check_other_pts and sometrue([norm(last_x - pt, normord) < dx \ 3148 for pt in other_pts]): 3149 # we've hit a different fixed point (or other feature), so stop 3150 break 3151 f = -w_sgn * gen.Rhs(0, last_x, gen.pars) 3152 if all(sign(f) != sign(last_f)): 3153 f = -f 3154 # on other side of manifold so must keep stepping in the 3155 # same direction, therefore switch signs! 3156 # PREDICTION 3157 x_ic = last_x + w_sgn*sgn*dxscaled*f/norm(f,normord) 3158 last_f = f 3159 if verboselevel>1: 3160 print "\nStarting from point ", last_x 3161 delta = w_sgn*sgn*dxscaled*f/norm(f,normord) 3162 print "Trying point ", x_ic, "in direction (%.6f, %.6f)\n" % (delta[0], delta[1]) 3163 dx_perp = dx_perp_default 3164 # CORRECTION 3165 while dx_perp > dx_perp_eps: 3166 try: 3167 x = onto_manifold(x_ic, dx_perp, get_perp(f/norm(f,normord)), 3168 dircode=integ_dircode) 3169 except RuntimeError, e: 3170 dx_perp *= 0.75 3171 else: 3172 break 3173 if dx_perp <= dx_perp_eps: 3174 # RuntimeError was raised and could not continue reducing dx_perp 3175 print "dx_perp reached lower tolerance =", dx_perp_eps 3176 print e 3177 break # end while search 3178 else: 3179 curve_len += norm(x-last_x, normord) 3180 piece[sgn*curve_len] = x 3181 last_x = x 3182 num_pts += 1 3183 if verboselevel>1: 3184 print "\nManifold has %i points" % num_pts 3185 elif verboselevel>0: 3186 print ".", 3187 sys.stdout.flush() 3188 if verboselevel>0: 3189 # finish the line 3190 print " " 3191 indepvar, piece_sorted = sortedDictLists(piece, byvalue=False) 3192 manifold[w] = pointsToPointset(piece_sorted, indepvarname='arc_len', 3193 indepvararray=indepvar, norm=normord) 3194 gen.eventstruct['Gamma_out_plus'].activeFlag=False 3195 gen.eventstruct['Gamma_out_minus'].activeFlag=False 3196 ## gen.eventstruct['fp_closest'].activeFlag=False 3197 3198 return manifold 3199 3200
3201 -def make_flow_normal_event(x, y, dxdt, dydt, targetlang, flatspec=None, 3202 fnspec=None, evtArgs=None):
3203 """For 2D vector fields only. 3204 3205 Supply flatspec if built vector field using ModelConstructor tools, 3206 otherwise specify funcspec argument. 3207 """ 3208 x_par = x+'_normflow_p' 3209 y_par = y+'_normflow_p' 3210 3211 if fnspec is None: 3212 if flatspec is None: 3213 raise ValueError("Must supply one of funcspec or flatspec") 3214 varnames=[] 3215 parnames=[] 3216 inputnames=[] 3217 fnspecs={} 3218 else: 3219 if flatspec is not None: 3220 raise ValueError("Must supply only one of funcspec or flatspec") 3221 try: 3222 varnames = fnspec['vars'] 3223 except KeyError: 3224 varnames = [] 3225 try: 3226 parnames = fnspec['pars'] 3227 except KeyError: 3228 parnames = [] 3229 try: 3230 inputnames = fnspec['inputs'] 3231 except KeyError: 3232 inputnames = [] 3233 try: 3234 fnspecs = fnspec['auxfns'] 3235 except KeyError: 3236 fnspecs = {} 3237 if evtArgs is None: 3238 evtArgs = {'name': 'flow_normal_2D_evt', 3239 'eventtol': 1e-5, 3240 'eventdelay': 1e-4, 3241 'starttime': 0, 3242 'precise': True, 3243 'term': False} 3244 else: 3245 evtArgs['term'] = False 3246 evtArgs['name'] = 'flow_normal_2D_evt' 3247 v_dot_f = '(' + x_par + ' - ' + x + ') * (' + dxdt + ') + ' + \ 3248 '(' + y_par + ' - ' + y + ') * (' + dydt + ')' 3249 norm_v = 'sqrt(' + x_par + '*' + x_par + '+' + y_par + '*' + y_par + ')' 3250 norm_f = 'sqrt(pow((' + dydt + '),2) + pow((' + dxdt + '),2))' 3251 flow_n_str = v_dot_f + '/(' + norm_v + '+' + norm_f + ')' 3252 ev = Events.makeZeroCrossEvent(expr=flow_n_str, 3253 dircode=0, 3254 argDict=evtArgs, 3255 targetlang=targetlang, 3256 varnames=varnames, 3257 parnames=parnames+[x_par,y_par], 3258 inputnames=inputnames, fnspecs=fnspecs, 3259 flatspec=flatspec) 3260 ev_helper = {'event_names': {2:'flow_normal_2D_evt'}, # 2 for 2D 3261 'pars_to_vars': {x_par:x, y_par:y}} 3262 return (ev, ev_helper)
3263 3264
3265 -class phaseplane(object):
3266 """Working environment for 2D phase-plane analysis. 3267 3268 Effectively a thin wrapper around a Generator class 3269 (will later be around a Model class, in order to support hybrid 3270 2D vector fields) 3271 """ 3272
3273 - def __init__(self, vf_info, x_axis='', 3274 name='', normord=2, eps=1e-12):
3275 """Use x_axis to specify which variable should appear on the x-axis""" 3276 3277 assert isreal(eps) and eps > 0, "eps tolerance must be a positive real number" 3278 self.eps = eps 3279 assert isinstance(normord, int) and normord > 0, "normord must be a positive integer" 3280 self.normord = normord 3281 self.name = name 3282 try: 3283 assert isinstance(vf_info.ODEclass, ODEsystem), \ 3284 "Can only use this class with ODE system Generators" 3285 # Should make this compatible with ModelSpec construction too 3286 assert len(vf_info.ODEargs.vars)==2, "Only 2D systems permitted for phase planes" 3287 except AttributeError: 3288 raise TypeError("Invalid form given for vf_info") 3289 self.vf_info = vf_info 3290 # store copy of the actual vector definition to make sure this doesn't 3291 # change when things are added or removed and the vf is rebuilt -- 3292 # otherwise all self.objects will be invalidated 3293 self._vf_core_copy = copy.copy(vf_info.ODEargs) 3294 if x_axis != '': 3295 assert x_axis in vf_info.ODEargs.vars 3296 self.x_axis = x_axis 3297 self.y_axis = remain(vf_info.ODEargs.vars, x_axis)[0] 3298 else: 3299 self.x_axis, self.y_axis = vf_info.ODEargs.vars 3300 try: 3301 self.build_vf() 3302 except ValueError: 3303 self.vf = None 3304 self.objects = args(fixedpoints=None,nullclines=None,limitcycles=None, 3305 submanifolds=None,trajectories=None,specialpoints=None)
3306
3307 - def build_vf(self):
3308 # Later, wrap generator into a Model object using embed() 3309 # Verify that self._vf_info.ODEargs has same core attributes as 3310 # self._vf_core_copy, otherwise raise an exception 3311 try: 3312 self.vf = self.vf_info.ODEclass.__new__(self.vf_info.ODEclass, 3313 self.vf_info.ODEargs) 3314 except: 3315 raise ValueError("Invalid or incomplete parameters for building vector field Generator")
3316
3317 - def add(self, attribute, value):
3318 try: 3319 setattr(self.vf_info['args'], attribute, value) 3320 except: 3321 raise ValueError("Invalid vector field attribute %s or value %s"%(attribute,str(value)))
3322
3323 - def remove(self, attribute, value):
3324 try: 3325 del self.vf_info['args'][attribute][value] 3326 except: 3327 raise ValueError("Could not delete vector field value %s of attribute %s"%(str(value), attribute))
3328 3329 3330
3331 -def find_period(pts, thresh, dir=1, with_indices=False):
3332 """pts is a Pointset. 3333 3334 thresh is a 1D Point or a dictionary, whose coordinate name corresponds 3335 to a variable in the pts pointset. 3336 3337 dir is either 1 or -1, indicating which direction the threshold must be 3338 crossed to be counted (default 1 = increasing). 3339 3340 with_indices is a Boolean indicating whether to return the pair of indices 3341 indicating the beginning and end points of the last period (default False). 3342 """ 3343 try: 3344 threshdict = dict(thresh) 3345 except: 3346 raise TypeError("thresh must be a 1D Point object or dictionary") 3347 assert len(threshdict)==1, "thresh must be a 1D Point object or dictionary" 3348 var = thresh.keys()[0] 3349 a = pts[var] 3350 t = pts['t'] 3351 ts = [] 3352 ixs = [] 3353 th = thresh[var] 3354 # if don't start on appropriate side of thresh, wait 3355 def inc_fn(x): 3356 return x < th
3357 def dec_fn(x): 3358 return x > th 3359 if dir == 1: 3360 off_fn = inc_fn 3361 elif dir == -1: 3362 off_fn = dec_fn 3363 else: 3364 raise ValueError("dir must be 1 or -1") 3365 off = off_fn(a[0]) 3366 for i,av in enumerate(a): 3367 if off and not off_fn(av): 3368 ts.append(t[i]) 3369 ixs.append(i) 3370 off = False 3371 ## print "Detected at i=%d, a=%f"%(i,av) 3372 ## print ts 3373 elif not off and off_fn(av): 3374 off = True 3375 if len(ts) >= 1: 3376 if with_indices: 3377 return ts[-1]-ts[-2], ixs 3378 else: 3379 return ts[-1]-ts[-2] 3380 else: 3381 print len(ts), "is not enough periods", 3382 return NaN 3383 3384 3385 # --------------------------------------------------------------- 3386
3387 -def _filter_consecutive(indices, minimize_values=None):
3388 """Return index array containing no consecutive values. 3389 if minimize_values option is given, this sequence must contain positions for 3390 all indices in the first argument, and will be used to choose which of any 3391 consecutive indices to retain in the returned sequence. Otherwise, the first 3392 index will be chosen arbitrarily. 3393 3394 E.g. to find the *last* index of every cluster instead of the default, use 3395 minimize_values=range(max_index,0,-1) 3396 """ 3397 ## Find clusters 3398 clusters = [map(operator.itemgetter(1), g) for k, g in \ 3399 itertools.groupby(enumerate(indices), lambda (i,x):i-x)] 3400 ## Minimize on clusters 3401 result = [] 3402 for c in clusters: 3403 if len(c) == 1: 3404 result.append(c[0]) 3405 else: 3406 if minimize_values is None: 3407 result.append( c[0] ) 3408 else: 3409 mvals = np.array(minimize_values) 3410 representative = np.argmin(np.array([mvals[c]])) 3411 result.append( c[representative] ) 3412 return np.array(result)
3413 3414 3415 # Feature sub-classes
3416 -class zone_node(ql_feature_node):
3417 """Phase plane 'zone' node of hierarchical qualitative feature abstract class. 3418 A zone is a bounding box in the phase plane containing a feature, grown to include 3419 a region that satisfies some local condition. 3420 3421 Must define _leaf_class for each implementation of a zone_node class 3422 """
3423 - def finish(self, target):
3424 self.subfeatures = dict.fromkeys(self.results.zone_center_ixs, None) 3425 for ix in self.results.zone_center_ixs: 3426 feat = self._leaf_class('zone_%i'%ix, pars=self.pars) 3427 feat.pars.all_zone_ixs = list(self.results.zone_center_ixs) 3428 feat.pars.index = ix 3429 self.subfeatures[ix] = feat 3430 feat(target)
3431
3432 - def _local_init(self):
3433 if not hasattr(self.pars, 'refine'): 3434 self.pars.refine = 0
3435 3436
3437 -class zone_leaf(ql_feature_leaf):
3438 """Phase plane 'zone' for leaf of hierarchical qualitative feature abstract class. 3439 A zone is a bounding box in the phase plane containing a feature, grown to include 3440 a region that satisfies some local condition. 3441 """
3442 - def evaluate(self, target):
3443 self.results.zones = None
3444
3445 - def finish(self, target):
3446 """Override this function to be called if evaluate returns True""" 3447 pass
3448 3449 3450 # Abstract classes
3451 -class nullcline_zone_node(zone_node):
3452 pass
3453 3454
3455 -class nullcline_zone_leaf(zone_leaf):
3456 """Parameters to apply: 3457 pars: xtol, refine (integer, default 0) 3458 optional pars: find_exact_center (unused) 3459 """
3460 - def _prepare(self, nullc):
3461 center_ix = self.pars.index 3462 x_center_approx, y_center_approx = nullc.array[center_ix] 3463 # Do we care what is the exact position of the zone's defining position? 3464 try: 3465 exact = self.pars.find_exact_center 3466 except AttributeError: 3467 # default False 3468 exact = False 3469 if exact: 3470 raise NotImplementedError 3471 else: 3472 x_center = x_center_approx 3473 y_center = y_center_approx 3474 return center_ix, x_center, y_center
3475
3476 - def _set_mutex_zone_by_width(self, center_ix, x_center, y_center, nullc):
3477 """Fix zone size by width or halfway to next center, if closer.""" 3478 try: 3479 zone_width = self.pars.zone_width 3480 except AttributeError: 3481 raise ValueError("Must set a zone_width parameter for this feature") 3482 min_zone_x = x_center - zone_width 3483 max_zone_x = x_center + zone_width 3484 x_range = [np.nan, np.nan] 3485 y_range = [np.nan, np.nan] 3486 zone_position = self.pars.all_zone_ixs.index(center_ix) 3487 if zone_position == 0: 3488 # everything to the left is already included 3489 x_range[0] = max( nullc.array[0,0], min_zone_x ) 3490 else: 3491 ix = self.pars.all_zone_ixs[zone_position-1] 3492 x_range[0] = max( 0.5*(nullc.array[ix,0]+x_center), min_zone_x ) 3493 y_range[0] = nullc(x_range[0]) 3494 3495 if center_ix == self.pars.all_zone_ixs[-1]: 3496 # everything to the right is already included 3497 x_range[1] = min( nullc.array[-1,0], max_zone_x ) 3498 else: 3499 ix = self.pars.all_zone_ixs[zone_position+1] 3500 x_range[1] = min( 0.5*(nullc.array[ix,0]+x_center), max_zone_x ) 3501 y_range[1] = nullc(x_range[1]) 3502 return [np.array(x_range), np.array(y_range)]
3503 3504
3505 - def _grow_zone_by_radius(self, center_ix, x_center, y_center, nullc, property_func):
3506 raise NotImplementedError
3507
3508 - def _grow_zone_by_width(self, center_ix, x_center, y_center, nullc, property_func):
3509 """property_func must return +/- 1 or 0/1 as a function of x""" 3510 try: 3511 zone_width = self.pars.zone_width 3512 except AttributeError: 3513 zone_width = np.inf # may fail unexpectedly 3514 print "Warning: zone_width parameter defaulted to infinity" 3515 min_zone_x = x_center - zone_width 3516 max_zone_x = x_center + zone_width 3517 # Grow domain of zone past known sample points with same property 3518 # until intermediate position between sample points found 3519 x_range = [np.nan, np.nan] 3520 y_range = [np.nan, np.nan] 3521 zone_position = self.pars.all_zone_ixs.index(center_ix) 3522 3523 # Left search 3524 if zone_position == 0: 3525 # everything to the left is already included 3526 x_range[0] = max( nullc.array[0,0], min_zone_x ) 3527 y_range[0] = nullc(x_range[0]) 3528 else: 3529 # all sample points with indices ix_after:center_ix (inclusive) 3530 # have the same property. 3531 ix_before = self.pars.all_zone_ixs[zone_position-1] 3532 ix_after = ix_before + 1 3533 if nullc.array[ix_after,0] < min_zone_x: 3534 x_range[0] = min_zone_x 3535 y_range[0] = nullc(x_range[0]) 3536 else: 3537 # search between these points on the nullcline 3538 xsol = bisection(property_func, nullc.array[ix_before,0], 3539 nullc.array[ix_after,0], xtol=self.pars.xtol) 3540 if abs(xsol - nullc.array[ix_before,0]) < self.pars.xtol: 3541 # corner case where zero is right at a sample point 3542 # try again with extended sample points, where available 3543 if ix_before > 0: 3544 xsol = bisection(property_func, nullc.array[ix_before-1,0], 3545 nullc.array[ix_after,0], xtol=self.pars.xtol) 3546 elif abs(xsol - nullc.array[ix_after,0]) < self.pars.xtol: 3547 # corner case where zero is right at a sample point 3548 # try again with extended sample points, where available 3549 if ix_after < len(nullc.array) - 1: 3550 xsol = bisection(property_func, nullc.array[ix_before,0], 3551 nullc.array[ix_after+1,0], xtol=self.pars.xtol) 3552 x_range[0] = xsol 3553 y_range[0] = nullc(xsol) 3554 3555 # Right search 3556 if center_ix == self.pars.all_zone_ixs[-1]: 3557 # everything to the right is already included 3558 x_range[1] = nullc.array[-1,0] 3559 y_range[1] = nullc.array[-1,1] 3560 else: 3561 # all sample points with indices center_ix:ix_before (inclusive) 3562 # have the same property. 3563 ix_before = self.pars.all_zone_ixs[zone_position+1] 3564 ix_after = ix_before + 1 3565 if nullc.array[ix_before,0] > max_zone_x: 3566 x_range[1] = max_zone_x 3567 y_range[1] = nullc(x_range[1]) 3568 else: 3569 # search between these points on the nullcline 3570 xsol = bisection(property_func, nullc.array[ix_before,0], 3571 nullc.array[ix_after,0], xtol=self.pars.xtol) 3572 if abs(xsol - nullc.array[ix_before,0]) < self.pars.xtol: 3573 # corner case where zero is right at a sample point 3574 # try again with extended sample points, where available 3575 if ix_before > 0: 3576 xsol = bisection(property_func, nullc.array[ix_before-1,0], 3577 nullc.array[ix_after,0], xtol=self.pars.xtol) 3578 elif abs(xsol - nullc.array[ix_after,0]) < self.pars.xtol: 3579 # corner case where zero is right at a sample point 3580 # try again with extended sample points, where available 3581 if ix_after < len(nullc.array) - 1: 3582 xsol = bisection(property_func, nullc.array[ix_before,0], 3583 nullc.array[ix_after+1,0], xtol=self.pars.xtol) 3584 x_range[1] = xsol 3585 y_range[1] = nullc(xsol) 3586 3587 return [np.array(x_range), np.array(y_range)]
3588 3589 3590 # Implementations
3591 -class fixedpoint_zone(nullcline_zone_leaf):
3592 # needs *two* nullcline objects and a f.p. object 3593 pass
3594 3595 3596 # define prior to node so that can provide leaf class name to node class
3597 -class inflection_zone_leaf(nullcline_zone_leaf):
3598 """A single inflection point zone. 3599 3600 pars: 3601 xtol --> precision of zone center finding 3602 find_exact -> exact x value of inflection (not implemented yet) 3603 zone_width -> positive scalar (default Inf grows to next inflection point) 3604 """ 3605 #Only use one of zone_radius or zone_width, otherwise zone_width will be used.
3606 - def evaluate(self, nullc):
3607 # Indices of inflection mark last known sample point having 3608 # same concavity to those locally on its left in the point order 3609 # from left to right. 3610 center_ix, x_center, y_center = self._prepare(nullc) 3611 self.results.x_center = x_center 3612 self.results.y_center = y_center 3613 self.results.zone = self._grow_zone_by_width(center_ix, x_center, y_center, nullc, 3614 nullc.concavity) 3615 # if got this far, succeeded in finding a zone 3616 return True
3617 3618
3619 -class inflection_zone_node(nullcline_zone_node):
3620 """Find all inflection point zones 3621 """ 3622 _leaf_class = inflection_zone_leaf 3623
3624 - def evaluate(self, nullc):
3625 """Assumes at least 2 sample points in nullcline. 3626 """ 3627 # Find all approximate places (sample points) where 3628 # inflections occur. 3629 # Concavities are -1, 0, 1 but we don't care about the signs 3630 # so much as their changes. To this end, replace all zeros 3631 # with their previous neighbor's value 3632 xarray = nullc.sample_x_domain(refine=self.pars.refine) 3633 concs = nullc.concavity(xarray) 3634 conc_zeros = concs == 0 3635 while np.sometrue(conc_zeros): 3636 concs_list = list(concs) 3637 # find lowest index with a zero 3638 zero_ix = concs_list.index(0) 3639 if zero_ix > 0: 3640 concs[zero_ix] = concs[zero_ix-1] 3641 else: 3642 # replace with next non-zero value 3643 try: 3644 pos_ix = concs_list.index(1) 3645 except ValueError: 3646 # no +1's 3647 pos_ix = np.inf 3648 try: 3649 neg_ix = concs_list.index(-1) 3650 except ValueError: 3651 # no -1's 3652 neg_ix = np.inf 3653 min_ix = min( pos_ix, neg_ix ) 3654 if isfinite(min_ix): 3655 concs[0] = concs[min_ix] 3656 else: 3657 concs[0] = 1 3658 conc_zeros = concs == 0 3659 # ignore changes within a single sample point of the endpoints 3660 # only -1 indicate. The extra -1 subtraction ensures all the +1's 3661 # become zero so that argwhere can discount those locations. 3662 zone_center_ixs = np.argwhere(concs[:-1] * concs[1:] - 1).flatten() 3663 # restore to native x array of nullcline (in case of refinement) 3664 zone_center_ixs = np.unique(nullc.array[:,0].searchsorted(xarray[zone_center_ixs])) 3665 self.results.zone_center_ixs = _filter_consecutive(zone_center_ixs, 3666 minimize_values=concs) 3667 return len(self.results.zone_center_ixs) > 0
3668 3669 3670
3671 -class max_curvature_zone_leaf(nullcline_zone_leaf):
3672 """A single zone of locally maximal curvature 3673 3674 pars: 3675 xtol --> precision of zone center finding 3676 direction (currently ignored -- looks for both) 3677 min_curvature_pc --> percentage of the maximum curvature found at sample 3678 points for which a local minimum value of curvature must meet to be 3679 considered a zone. 3680 find_exact -> exact x value of max curvature (not implemented yet) 3681 zone_width -> positive scalar (default Inf grows to next local maximum) 3682 """ 3683 #Only use one of zone_radius or zone_width, otherwise zone_width will be used.
3684 - def evaluate(self, nullc):
3685 # Indices of max curvature mark last known sample point having 3686 # same curvature gradient to those locally on its left in the point order 3687 # from left to right. 3688 center_ix, x_center, y_center = self._prepare(nullc) 3689 self.results.x_center = x_center 3690 self.results.y_center = y_center 3691 self.results.zone = self._set_mutex_zone_by_width(center_ix, x_center, 3692 y_center, nullc) 3693 # if got this far, succeeded in finding a zone 3694 return True
3695 3696
3697 -class max_curvature_zone_node(nullcline_zone_node):
3698 """Find all zones with locally maximal curvature. 3699 Optional selection of those facing up or down only: 3700 par: direction (-1, 0, 1, where 0 selects either (default)) 3701 """ 3702 _leaf_class = max_curvature_zone_leaf 3703
3704 - def evaluate(self, nullc):
3705 try: 3706 dirn = self.pars.direction 3707 except AttributeError: 3708 dirn = 0 # both 3709 xarray = nullc.sample_x_domain(refine=self.pars.refine) 3710 gcurvature = nullc.grad_curvature(xarray) 3711 # find local maxima, including endpoint maxima 3712 # TEMP: don't look at endpoints for now 3713 # inside endpoints, look for where d/dx( curvature ) = 0 3714 # look for sign changes, just like concavity signs 3715 # for inflection feature 3716 gsigns = np.sign( gcurvature ) 3717 gsign_zeros = gsigns == 0 3718 while np.sometrue(gsign_zeros): 3719 gsigns_list = list(gsigns) 3720 # find lowest index with a zero 3721 zero_ix = gsigns_list.index(0) 3722 if zero_ix > 0: 3723 gsigns[zero_ix] = gsigns[zero_ix-1] 3724 else: 3725 # replace with next non-zero value 3726 try: 3727 pos_ix = gsigns_list.index(1) 3728 except ValueError: 3729 # no +1's 3730 pos_ix = np.inf 3731 try: 3732 neg_ix = gsigns_list.index(-1) 3733 except ValueError: 3734 # no -1's 3735 neg_ix = np.inf 3736 min_ix = min( pos_ix, neg_ix ) 3737 if isfinite(min_ix): 3738 gsigns[0] = gsigns[min_ix] 3739 else: 3740 gsigns[0] = 1 3741 gsign_zeros = gsigns == 0 3742 # ignore changes within a single sample point of the endpoints 3743 # only -1 indicate. The extra -1 subtraction ensures all the +1's 3744 # become zero so that argwhere can discount those locations. 3745 candidate_ixs = np.argwhere(gsigns[:-1] * gsigns[1:] - 1).flatten() 3746 curvature = nullc.curvature(xarray) 3747 if dirn == 0: 3748 max_curvature = max(abs(curvature)) 3749 def test(c, thresh): 3750 return abs(c) > thresh
3751 elif dirn == -1: 3752 # concave down only 3753 max_curvature = min([0, min(curvature)]) 3754 def test(c, thresh): 3755 return c < thresh
3756 elif dirn == 1: 3757 # concave up only 3758 max_curvature = max([0, max(curvature)]) 3759 def test(c, thresh): 3760 return c > thresh 3761 else: 3762 raise ValueError("Invalid direction given") 3763 try: 3764 min_curvature_pc = self.pars.min_curvature_pc 3765 except AttributeError: 3766 min_curvature_pc = 0 3767 curvature_thresh = min_curvature_pc*max_curvature 3768 zone_center_ixs = [ix for ix in candidate_ixs if \ 3769 test(curvature[ix], curvature_thresh)] 3770 # restore to native x array of nullcline (in case of refinement) 3771 zone_center_ixs = np.unique(nullc.array[:,0].searchsorted(xarray[zone_center_ixs])) 3772 self.results.zone_center_ixs = _filter_consecutive(zone_center_ixs, 3773 minimize_values=curvature) 3774 return len(self.results.zone_center_ixs) > 0 3775 3776 3777 # NOT YET DEFINED
3778 -class min_curvature_zone(nullcline_zone_leaf):
3779 pass
3780 3781 3782 3783 # --------------------------------------------------------------- 3784 # PLOTTING and ANIMATION TOOLS 3785
3786 -def plot_PP_fps(fps, coords=None, do_evecs=False, markersize=10):
3787 """Draw 2D list of fixed points (singletons allowed), must be 3788 fixedpoint_2D objects. 3789 3790 Optional do_evecs (default False) draws eigenvectors around each f.p. 3791 3792 Requires matplotlib 3793 """ 3794 if isinstance(fps, fixedpoint_2D): 3795 # singleton passed 3796 fps = [fps] 3797 3798 x, y = fps[0].fp_coords 3799 for fp in fps: 3800 # When matplotlib implements half-full circle markers 3801 #if fp.classification == 'saddle': 3802 # half-open circle 3803 if fp.stability == 'u': 3804 style = 'wo' 3805 elif fp.stability == 'c': 3806 style = 'co' 3807 else: # 's' 3808 style = 'ko' 3809 plt.plot(fp.point[x], fp.point[y], style, markersize=markersize, mew=2)
3810
3811 -def plot_PP_vf(gen, xname, yname, N=20, subdomain=None, scale_exp=0):
3812 """Draw 2D vector field in (xname, yname) coordinates of given Generator, 3813 sampling on a uniform grid of n by n points. 3814 3815 Optional subdomain dictionary specifies axes limits in each variable, 3816 otherwise Generator's xdomain attribute will be used. 3817 3818 For systems of dimension > 2, the non-phase plane variables will be held 3819 constant at their initial condition values set in the Generator. 3820 3821 Optional scale_exp is an exponent (domain is all reals) which rescales 3822 size of arrows in case of disparate scales in the vector field. Larger 3823 values of scale magnify the arrow sizes. For stiff vector fields, values 3824 from -3 to 3 may be necessary to resolve arrows in certain regions. 3825 3826 Requires matplotlib 0.99 or later 3827 """ 3828 assert N > 1 3829 xdom = gen.xdomain[xname] 3830 ydom = gen.xdomain[yname] 3831 if subdomain is not None: 3832 try: 3833 xdom = subdomain[xname] 3834 except KeyError: 3835 pass 3836 try: 3837 ydom = subdomain[yname] 3838 except KeyError: 3839 pass 3840 assert all(isfinite(xdom)), "Must specify a finite domain for x direction" 3841 assert all(isfinite(ydom)), "Must specify a finite domain for y direction" 3842 w = xdom[1]-xdom[0] 3843 h = ydom[1]-ydom[0] 3844 3845 xdict = gen.initialconditions.copy() 3846 3847 xix = gen.funcspec.vars.index(xname) 3848 yix = gen.funcspec.vars.index(yname) 3849 3850 xs = np.linspace(xdom[0], xdom[1], N) 3851 ys = np.linspace(ydom[0], ydom[1], N) 3852 3853 X, Y = np.meshgrid(xs, ys) 3854 dxs, dys = np.meshgrid(xs, ys) 3855 3856 ## dx_big = 0 3857 ## dy_big = 0 3858 dz_big = 0 3859 vec_dict = {} 3860 3861 # dxs = array((n,), float) 3862 # dys = array((n,), float) 3863 for xi, x in enumerate(xs): 3864 for yi, y in enumerate(ys): 3865 xdict.update({xname: x, yname: y}) 3866 dx, dy = gen.Rhs(0, xdict)[[xix, yix]] 3867 # note order of indices 3868 dxs[yi,xi] = dx 3869 dys[yi,xi] = dy 3870 dz = np.linalg.norm((dx,dy)) 3871 ## vec_dict[ (x,y) ] = (dx, dy, dz) 3872 ## if dx > dx_big: 3873 ## dx_big = dx 3874 ## if dy > dy_big: 3875 ## dy_big = dy 3876 if dz > dz_big: 3877 dz_big = dz 3878 3879 plt.quiver(X, Y, dxs, dys, angles='xy', pivot='middle', units='inches', 3880 scale=dz_big*max(h,w)/(10*exp(2*scale_exp)), lw=0.01/exp(scale_exp-1), 3881 headwidth=max(2,1.5/(exp(scale_exp-1))), 3882 #headlength=2*max(2,1.5/(exp(scale_exp-1))), 3883 width=0.001*max(h,w), minshaft=2, minlength=0.001) 3884 3885 ## # Use 95% of interval size 3886 ## longest_x = w*0.95/(n-1) 3887 ## longest_y = h*0.95/(n-1) 3888 ## longest = min(longest_x, longest_y) 3889 ## 3890 ## scaling_x = longest_x/dx_big 3891 ## scaling_y = longest_y/dy_big 3892 ## scaling = min(scaling_x, scaling_y) 3893 3894 ax = plt.gca() 3895 ## hw = longest/10 3896 ## hl = hw*2 3897 ## for x in xs: 3898 ## for y in ys: 3899 ## dx, dy, dz = vec_dict[ (x,y) ] 3900 ## plt.arrow(x, y, scaling*dx, yscale*scaling*dy, 3901 ## head_length=hl, head_width=hw, length_includes_head=True) 3902 ax.set_xlim(xdom) 3903 ax.set_ylim(ydom) 3904 plt.draw()
3905 3906
3907 -def get_PP(gen, pt, vars, doms=None, doplot=True, 3908 t=0, saveplot=None, format='svg', trail_pts=None, 3909 null_style='-', orbit_col='g', orbit_style='-'):
3910 """Get a phase plane for generator gen at point pt. 3911 Specify t if the system is non-autonomous. 3912 If trail_pts are given (e.g., via show_PPs) then these are added 3913 behind the point pt. 3914 3915 Requires matplotlib 3916 """ 3917 xFS, yFS = gen._FScompatibleNames(vars) 3918 x, y = gen._FScompatibleNamesInv(vars) 3919 pt = gen._FScompatibleNamesInv(pt) 3920 ptFS = gen._FScompatibleNames(pt) 3921 3922 sub_dom = filteredDict(dict(ptFS), 3923 remain(gen.funcspec.vars, [x,y])) 3924 if doms is None: 3925 doms = gen.xdomain 3926 else: 3927 doms = gen._FScompatibleNames(doms) 3928 sub_dom[xFS] = doms[xFS] 3929 sub_dom[yFS] = doms[yFS] 3930 x_dom = doms[xFS] 3931 y_dom = doms[yFS] 3932 x_interval = Interval(xFS, float, x_dom, abseps=0) 3933 y_interval = Interval(yFS, float, y_dom, abseps=0) 3934 fps = find_fixedpoints(gen, n=6, subdomain=sub_dom, 3935 t=t, eps=1e-8) 3936 3937 f = figure(1) 3938 nulls_x, nulls_y, handles = find_nullclines(gen, xFS, yFS, 3939 x_dom=x_dom, y_dom=y_dom, 3940 fixed_vars=ptFS, n=3, t=t, 3941 max_step={xFS: 0.1, yFS: 1}, 3942 max_num_points=10000, fps=fps, 3943 doplot=doplot, plot_style=null_style, 3944 newfig=False) 3945 if doplot: 3946 tol = 0.01 3947 xwidth = abs(x_dom[1]-x_dom[0]) 3948 ywidth = abs(y_dom[1]-y_dom[0]) 3949 x_lims = [x_dom[0]-tol*xwidth, x_dom[1]+tol*xwidth] 3950 y_lims = [y_dom[0]-tol*ywidth, y_dom[1]+tol*ywidth] 3951 if trail_pts is not None: 3952 pp.plot(trail_pts[x], trail_pts[y], orbit_col) 3953 pp.plot(pt[x], pt[y], orbit_col+'o') 3954 for fp in fps: 3955 if fp[x] in x_interval and fp[y] in y_interval: 3956 pp.plot(fp[x], fp[y], 'ko') 3957 ## the following commands can introduce artifact lines!! 3958 ax = pp.gca() 3959 ax.set_xlim(x_lims) 3960 ax.set_ylim(y_lims) 3961 draw() 3962 ## 3963 if saveplot is not None: 3964 if saveplot[-4:] != '.' + format: 3965 saveplot = saveplot + '.' + format 3966 f.savefig(saveplot, format=format) 3967 f.clf() 3968 return fps, nulls_x, nulls_y
3969 3970
3971 -def show_PPs(gen, x, y, traj, t_start, t_step, t_end, moviename=None, doms=None, 3972 show_trail=False, add_traj=None):
3973 """Show phase plane x vs y for Generator gen over trajectory traj, 3974 over times t_start:t_step:t_end. 3975 Option to create a movie, provided ffmpeg if installed. 3976 Optional domain dictionary for variables can be provided, otherwise their 3977 extents in the given trajectory will be used. 3978 Option to show trail behind green orbit (default False). 3979 Option to add a comparison trajectory / nullclines to the plot. 3980 3981 Requires matplotlib and ffmpeg 3982 """ 3983 t = t_start 3984 pt0 = traj(t_start) 3985 pt1 = traj(t_end) 3986 xs = [pt0[x], pt1[x]] 3987 ys = [pt0[y], pt1[y]] 3988 if doms is None: 3989 doms = {x: [min(xs),max(xs)], y: [min(ys),max(ys)]} 3990 if moviename is not None: 3991 os.system('mkdir %s' % moviename) 3992 os.chdir(moviename) 3993 i = 0 3994 while t <= t_end: 3995 pt = traj(t) 3996 i += 1 3997 print "Frame %i, t = %.4f: step %.4f until %.4f" % (i, t, t_step, t_end) 3998 if moviename is not None: 3999 saveplot = 'pp_fig_%03d' % i 4000 format = 'png' 4001 else: 4002 saveplot = None 4003 format = 'svg' 4004 if show_trail and t > t_start: 4005 trail_pts = traj.sample(tlo=t_start, thi=t) 4006 if add_traj is not None: 4007 trail_pts2 = add_traj.sample(tlo=t_start, thi=t) 4008 else: 4009 trail_pts = None 4010 trail_pts2 = None 4011 if add_traj is None: 4012 get_PP(gen, pt, [x, y], doms=doms, 4013 t=t, doplot=True, saveplot=saveplot, format=format, 4014 trail_pts=trail_pts) 4015 else: 4016 get_PP(gen, pt, [x, y], doms=doms, 4017 t=t, doplot=True, saveplot=None, 4018 trail_pts=trail_pts, null_style='-', orbit_col='g', orbit_style='-') 4019 get_PP(gen, add_traj(t), [x, y], doms=doms, 4020 t=t, doplot=True, saveplot=saveplot, format=format, 4021 trail_pts=trail_pts2, null_style=':', orbit_col='c', orbit_style='-') 4022 t += t_step 4023 if moviename is not None: 4024 # copy the final frame a few times to pad out end of video 4025 for j in range(i+1, i+51): 4026 os.system('cp pp_fig_%03d.png pp_fig_%03d.png' % (i, j)) 4027 os.system('ffmpeg -f image2 -i pp_fig_%03d.png -r 14 pp_movie.avi') 4028 os.chdir('..')
4029 4030 4031 # ----------------------------------------------------------- 4032 4033 # EXPERIMENTAL CLASS
4034 -class mesh_patch_2D(object):
4035 """2D mesh patch generator (for 4 or 8 points of a fixed distance 4036 from a central point). i.e., total number of mesh points may be 5 4037 or 9. 4038 4039 Once either 5 or 9 points have been selected, this is fixed for 4040 the mesh patch instance. The central point p0 and radii r of the 4041 mesh points are mutable using the 'update' method. If 4042 shape='circle' (default), mesh points are equidistant from p0. If 4043 shape='square', mesh points are distributed around the corners and 4044 mid-sides of a square of side length 2*r. 4045 4046 The points are in the order p0 followed by points in 'clockwise' 4047 order in the phase plane defined by the two variable names in p0. 4048 4049 Functions / callable objects can be applied to all points in the 4050 mesh patch by calling the mesh patch's 'eval' method with the 4051 function. It returns an a dictionary of mesh points -> function 4052 return values. 4053 4054 The direction of minimum (interpolated) gradient of a functional 4055 over the mesh can be determined from the 'min_gradient_dir' method. 4056 A similar method 'max_gradient_dir' also exists. 4057 4058 Highly experimental class! 4059 4060 Rob Clewley, Jan 2007 4061 """ 4062 _s2 = sqrt(2)/2 4063 _mesh5 = [(0,0), (0,1), (1,0), (0,-1), (-1,0)] 4064 _mesh9s = [(0,0), (0,1), (1,1), (1,0), (1,-1), 4065 (0,-1), (-1,-1), (-1,0), (-1, 1)] 4066 _mesh9c = [(0,0), (0,1), (_s2,_s2), (1,0), (_s2,-_s2), 4067 (0,-1), (-_s2,-_s2), (-1,0), (-_s2,_s2)] 4068
4069 - def __init__(self, p0, r, n=5, shape='circle'):
4070 """p0 should be of type Point, r must be either a scalar or a Point 4071 giving weighted radius in each of two directions, n must be 5 or 9. 4072 """ 4073 self.p0 = p0 4074 try: 4075 if len(r) == 2: 4076 assert r.coordnames == p0.coordnames, "Coordinate names of "\ 4077 "r and p0 must match" 4078 except TypeError: 4079 # r is a scalar 4080 pass 4081 self.r = r 4082 self.n = n 4083 self.shape = shape 4084 self.make_mesh(makelocalmesh=True) 4085 self.derive_angles()
4086
4087 - def make_mesh(self, makelocalmesh=False):
4088 if self.n==5: 4089 mesh = [self.r*array(p, 'd') for p in self._mesh5] 4090 self.shape = 'circle' # force it regardless of its value 4091 elif self.n==9: 4092 if self.shape=='circle': 4093 m = self._mesh9c 4094 elif self.shape=='square': 4095 m = self._mesh9s 4096 else: 4097 raise ValueError("Shape argument must be 'square' or 'circle'") 4098 mesh = [self.r*array(p, 'd') for p in m] 4099 else: 4100 raise ValueError("Only n=5 or 9 meshes are supported") 4101 # self.mesh is the absolute coords of the mesh 4102 try: 4103 self.mesh = pointsToPointset([self.p0+p for p in mesh]) 4104 except AssertionError: 4105 raise TypeError("Central point must be given by a 2D Point object") 4106 if makelocalmesh: 4107 # Only needs to be done once 4108 # Make (0,0) as a Point with the correct coordnames, for convenience 4109 zp = self.p0*0 4110 # self.mesh_local is the mesh in local coordinates (relative to p0) 4111 self.mesh_local = pointsToPointset([zp+p for p in mesh])
4112
4113 - def update(self, p0=None, r=None):
4114 """Change the central point p0 or patch radius r 4115 """ 4116 do_update = False 4117 if p0 is not None: 4118 self.p0 = p0 4119 do_update = True 4120 if r is not None: 4121 self.r = r 4122 do_update = True 4123 if do_update: 4124 self.make_mesh()
4125
4126 - def derive_angles(self):
4127 # angles are always measured in the positive direction here 4128 # angles don't include the centre point at (0,0), and are measured 4129 # from the "x" axis 4130 if self.n == 5: 4131 self._angles = [pi/2, 0, 3*pi/2, pi] 4132 else: 4133 # only mesh-points not aligned with axes need to be calculated 4134 # the others are constant regardless of re-scaling of axes 4135 vars = self.p0.coordnames 4136 self._angles = [] 4137 for p in self.mesh_local[1:]: 4138 a = atan2(p[vars[1]],p[vars[0]]) 4139 if a < 0: 4140 a += 2*pi 4141 self._angles.append(a)
4142 4143
4144 - def __getitem__(self, ix):
4145 return self.mesh[ix]
4146
4147 - def eval(self, f):
4148 """Evaluate the function or callable object f at the mesh patch points. 4149 Returns a dictionary of 4150 mesh point index -> function value 4151 Indices are always in the same order as the Pointset attribute 'mesh' 4152 """ 4153 res = {} 4154 for i, p in enumerate(self.mesh): 4155 try: 4156 res[i] = f(p) 4157 except: 4158 print "Problem evaluating supplied function on mesh patch at " \ 4159 "point ", p 4160 raise 4161 return res
4162
4163 - def setup_grad(self, valdict, orient, dir):
4164 try: 4165 vals = [valdict[i] for i in range(self.n)] 4166 except KeyError: 4167 raise ValueError("vals must have same length as number " 4168 "of mesh points") 4169 if orient is not None and dir is not None: 4170 ixs = [] 4171 try: 4172 vars = orient.coordnames 4173 except AttributeError: 4174 raise TypeError("orient must be a vector (Point object)") 4175 if dir not in [-1,1]: 4176 raise ValueError("dir value must be 1 or -1") 4177 x = dir*orient[vars[0]] 4178 y = dir*orient[vars[1]] 4179 # angle of directed orientation 4180 theta = atan2(y,x) 4181 if theta < 0: 4182 theta += 2*pi 4183 # filter out mesh points whose defining angles are outside of 4184 # the half-plane specified by the normal vector 4185 twopi = 2*pi 4186 for i, a in enumerate(self._angles): 4187 test = abs(a - theta) 4188 if test <= pi/2 or twopi-test <= pi/2: 4189 # append 1+i to correspond to 1..n mesh points 4190 ixs.append(i+1) 4191 ## print "Theta was ", theta 4192 ## print "Angles: ", self._angles 4193 ## print "Min grad using ixs", ixs 4194 use_vals = [vals[i] for i in ixs] 4195 else: 4196 ixs = range(1,self.n) 4197 use_vals = vals 4198 return vals, ixs, use_vals
4199
4200 - def min_gradient_dir(self, valdict, orient=None, dir=None):
4201 """Find the direction of any local minima of a functional over 4202 the mesh patch, whose dictionary of 4203 mesh point index -> function value 4204 is given by valdict. The magnitudes of these values are 4205 compared. The direction is indicated by a Point object, a 4206 relative position at the patch's characteristic radius from 4207 p0. 4208 4209 If the functional is constant everywhere on the patch (degenerate) 4210 then a random direction is returned. If the functional is constant 4211 everywhere except at the centre, then the vector (0,0) is returned 4212 if the centre has lower value; a random vector is returned 4213 otherwise. 4214 4215 If the functional is partially degenerate on some subset of 4216 mesh points then the optional orient and dir arguments can 4217 provide a means to select, provided the mesh resolution is 4218 sufficiently high. 4219 4220 orient is a vector (Point object) relative to p0 orienting a 4221 "positive" direction, such that the integer dir = -1 or +1 4222 selects one half of the mesh points on which to conduct the 4223 search. 4224 """ 4225 return self.__min_grad(valdict, orient, dir, 1)
4226
4227 - def min_gradient_mesh(self, valdict, orient=None, dir=None):
4228 """Find the direction of any local minima of a functional over 4229 the mesh patch, whose dictionary of 4230 mesh point index -> function value 4231 is given by valdict. The magnitudes of these values are 4232 compared. The direction is returned as a pair containing the two 4233 closest mesh points to the direction (in mesh local coords). 4234 4235 If the functional is constant everywhere on the patch (degenerate) 4236 then a random direction is returned. If the functional is constant 4237 everywhere except at the centre, then the vector (0,0) is returned 4238 if the centre has lower value; a random vector is returned 4239 otherwise. 4240 4241 If the functional is partially degenerate on some subset of 4242 mesh points then the optional orient and dir arguments can 4243 provide a means to select, provided the mesh resolution is 4244 sufficiently high. 4245 4246 orient is a vector (Point object) relative to p0 orienting a 4247 "positive" direction, such that the integer dir = -1 or +1 4248 selects one half of the mesh points on which to conduct the 4249 search. 4250 """ 4251 return self.__min_grad(valdict, orient, dir, 0)
4252
4253 - def __min_grad(self, valdict, orient, dir, return_type):
4254 vals, ixs, use_vals = self.setup_grad(valdict, orient, dir) 4255 degen_everywhere = alltrue([v==vals[0] for v in vals]) 4256 degen_exterior = alltrue([v==vals[ixs[0]] for v in use_vals]) 4257 degen_exterior_lower = degen_exterior and vals[0] >= vals[ixs[0]] 4258 degen_exterior_higher = degen_exterior and vals[0] < vals[ixs[0]] 4259 if degen_everywhere or degen_exterior_lower: 4260 # degenerate case -- return random vector 4261 x = uniform(-1,1) 4262 y = uniform(-1,1) 4263 vl = sqrt(x*x + y*y) 4264 x = x/vl 4265 y = y/vl 4266 if return_type == 0: 4267 raise RuntimeError("No valid directions to point in") 4268 else: 4269 return Point(dict(zip(self.p0.coordnames, [x,y]))) 4270 elif degen_exterior_higher: 4271 if return_type == 0: 4272 raise RuntimeError("No valid directions to point in") 4273 else: 4274 return Point(dict(zip(self.p0.coordnames, [0,0]))) 4275 d1 = (Inf, NaN) 4276 d2 = (Inf, NaN) 4277 for i, v in enumerate(use_vals): 4278 if ixs[i] == 0: 4279 # don't include centre point 4280 continue 4281 if abs(v) < d1[0]: 4282 d2 = d1 4283 d1 = (abs(v), ixs[i]) 4284 elif abs(v) < d2[0]: 4285 d2 = (abs(v), ixs[i]) 4286 # the two best mesh patch points (unless partially degenerate) 4287 pt1 = self.mesh_local[d1[1]] 4288 pt2 = self.mesh_local[d2[1]] 4289 if return_type == 0: 4290 return (pt1, pt2) 4291 else: 4292 score1 = d1[0] 4293 score2 = d2[0] 4294 if score1 + score2 == 0: 4295 interp_pt = pt1+pt2 4296 return self.r*interp_pt/norm(interp_pt) 4297 elif score1 == 0: 4298 return pt1 4299 elif score2 == 0: 4300 return pt2 4301 else: 4302 interp_pt = pt1/score1+pt2/score2 4303 return self.r*interp_pt/norm(interp_pt)
4304 4305
4306 - def max_gradient_dir(self, valdict, orient=None, dir=None):
4307 """Find the direction of maximum gradient of a functional over 4308 the mesh patch, whose dictionary of 4309 mesh point index -> function value 4310 is given by valdict. The magnitudes of the values are 4311 compared. The direction is indicated by a Point object, a 4312 relative position at the patch's characteristic radius from 4313 p0. 4314 4315 If the functional is constant everywhere on the patch (degenerate) 4316 then a random direction is returned. If the functional is constant 4317 everywhere except at the centre, then the vector (0,0) is returned 4318 if the centre has higher value; a random vector is returned 4319 otherwise. 4320 4321 If the functional is partially degenerate on some subset of 4322 mesh points then the optional orient and dir arguments can 4323 provide a means to select, provided the mesh resolution is 4324 sufficiently high. 4325 4326 orient is a vector (Point object) relative to p0 orienting a 4327 "positive" direction, such that the integer dir = -1 or +1 4328 selects one half of the mesh points on which to conduct the 4329 search. 4330 """ 4331 return self.__max_grad(valdict, orient, dir, 1)
4332
4333 - def max_gradient_mesh(self, valdict, orient=None, dir=None):
4334 """Find the direction of maximum gradient of a functional over 4335 the mesh patch, whose dictionary of 4336 mesh point index -> function value 4337 is given by valdict. The magnitudes of the values are 4338 compared. The direction is returned as a pair containing the two 4339 closest mesh points to the direction (in mesh local coords). 4340 4341 If the functional is constant everywhere on the patch (degenerate) 4342 then a random direction is returned. If the functional is constant 4343 everywhere except at the centre, then the vector (0,0) is returned 4344 if the centre has higher value; a random vector is returned 4345 otherwise. 4346 4347 If the functional is partially degenerate on some subset of 4348 mesh points then the optional orient and dir arguments can 4349 provide a means to select, provided the mesh resolution is 4350 sufficiently high. 4351 4352 orient is a vector (Point object) relative to p0 orienting a 4353 "positive" direction, such that the integer dir = -1 or +1 4354 selects one half of the mesh points on which to conduct the 4355 search. 4356 """ 4357 return self.__max_grad(valdict, orient, dir, 0)
4358
4359 - def __max_grad(self, valdict, orient, dir, return_type):
4360 vals, ixs, use_vals = self.setup_grad(valdict, orient, dir) 4361 degen_everywhere = alltrue([v==vals[0] for v in vals]) 4362 degen_exterior = alltrue([v==vals[1] for v in use_vals]) 4363 degen_exterior_lower = degen_exterior and vals[0] >= vals[ixs[0]] 4364 degen_exterior_higher = degen_exterior and vals[0] < vals[ixs[0]] 4365 if degen_everywhere or degen_exterior_higher: 4366 # degenerate case -- return random vector 4367 x = uniform(-1,1) 4368 y = uniform(-1,1) 4369 vl = sqrt(x*x + y*y) 4370 x = x/vl 4371 y = y/vl 4372 if return_type == 0: 4373 raise RuntimeError("No valid directions to point in") 4374 else: 4375 return Point(dict(zip(self.p0.coordnames, [x,y]))) 4376 elif degen_exterior_lower: 4377 if return_type == 0: 4378 raise RuntimeError("No valid directions to point in") 4379 else: 4380 return Point(dict(zip(self.p0.coordnames, [0,0]))) 4381 d1 = (0, NaN) 4382 d2 = (0, NaN) 4383 for i, v in enumerate(use_vals): 4384 if ixs[i] == 0: 4385 # don't include centre point 4386 continue 4387 if abs(v) > d1[0]: 4388 d2 = d1 4389 d1 = (abs(v), ixs[i]) 4390 elif abs(v) > d2[0]: 4391 d2 = (abs(v), ixs[i]) 4392 # the two best mesh patch points (unless partially degenerate) 4393 pt1 = self.mesh_local[d1[1]] 4394 pt2 = self.mesh_local[d2[1]] 4395 if return_type == 0: 4396 return (pt1, pt2) 4397 else: 4398 score1 = d1[0] 4399 score2 = d2[0] 4400 interp_pt = pt1*score1+pt2*score2 4401 return self.r*interp_pt/norm(interp_pt)
4402
4403 - def __repr__(self):
4404 return "Mesh patch [" + ", ".join(["(" + \ 4405 str(m).replace(' ','').replace('\n',', ')+")" \ 4406 for m in self.mesh]) +"]"
4407 4408 __str__ = __repr__
4409 4410 4411 4412 # ----------------------------------------------------------------------------- 4413 4414 ## PRIVATE CLASSES, FUNCTIONS, and CONSTANTS 4415 4416 # For use when operations don't allow symbol Inf 4417 _num_inf = 1.0e100 4418 4419
4420 -def create_test_fn(gen, tmax, dist_pts):
4421 """Create integration test function using the supplied generator. 4422 4423 The test function will return a dictionary of the two closest 4424 points (keys 1 and 2) mapping to their respective distances and 4425 pointset index positions. 4426 """ 4427 4428 gen.set(tdata=[0,tmax]) 4429 4430 def Tn_integ(ic): 4431 gen.set(ics=ic) 4432 try: 4433 test_traj = gen.compute('test') 4434 except: 4435 print "Problem integrating test trajectory at i.c. ", ic 4436 raise 4437 test_pts = test_traj.sample(coords=dist_pts.all_pts.coordnames) 4438 # distance of endpoint to pointset 4439 try: 4440 d_info = dist_pts(test_pts[-1], use_norm=True, minmax=['min']) 4441 except ValueError: 4442 # This error happens when fsolve tries initial conditions that 4443 # break the integrator 4444 return (_num_inf,NaN) 4445 pos = d_info['min'][1]['pos'] 4446 return (test_pts[-1]-dist_pts.all_pts[pos], pos)
4447 return Tn_integ 4448 4449
4450 -def create_test_fn_with_events(gen, tmax, dist_pts, iso_ev, other_evnames, pars_to_vars):
4451 """Create integration test function using the supplied generator, 4452 assuming it contains isochron-related events. 4453 4454 The test function will return a dictionary of the two closest 4455 points (keys 1 and 2) mapping to their respective distances and 4456 pointset index positions. 4457 """ 4458 4459 def Tn_integ(ic): 4460 gen.set(ics=ic, tdata=[0,tmax]) 4461 try: 4462 test_traj = gen.compute('test') 4463 except: 4464 print "Problem integrating test trajectory at i.c. ", ic 4465 raise 4466 test_pts = test_traj.sample(coords=dist_pts.all_pts.coordnames) 4467 # distance of endpoint to pointset 4468 try: 4469 d_info = dist_pts(test_pts[-1], use_norm=True, minmax=['min']) 4470 except ValueError: 4471 # This error happens when fsolve tries initial conditions that 4472 # break the integrator 4473 return (_num_inf,NaN) 4474 # refine min position using isochron-related events 4475 q=test_pts[-1] 4476 perp_ev, t_ev = _find_min_pt(gen, q, 4477 d_info['min'][1]['pos'], dist_pts.all_pts, 4478 pars_to_vars, iso_ev, other_evnames) 4479 ev_pt = perp_ev[0][q.coordnames] 4480 # different return format to version w/o events 4481 return (test_pts[-1]-ev_pt, t_ev, ev_pt)
4482 return Tn_integ 4483 4484 4485
4486 -def _xinf_ND(xdot,x0,args=(),xddot=None,xtol=1.49012e-8):
4487 """Private function for wrapping the fsolving for x_infinity 4488 for a variable x in N dimensions""" 4489 try: 4490 result = fsolve(xdot,x0,args,fprime=xddot,xtol=xtol,full_output=1) 4491 except (ValueError, TypeError, OverflowError): 4492 xinf_val = NaN 4493 except: 4494 print "Error in fsolve:", sys.exc_info()[0], sys.exc_info()[1] 4495 xinf_val = NaN 4496 else: 4497 if result[2] in (1,2,3): #,4,5): 4498 # 4,5 means "not making good progress" (see fsolve docstring) 4499 xinf_val = float(result[0]) 4500 else: 4501 xinf_val = NaN 4502 return xinf_val
4503 4504
4505 -def _xinf_1D(xdot,x0,args=(),xddot=None,xtol=1.49012e-8):
4506 """Private function for wrapping the solving for x_infinity 4507 for a variable x in 1 dimension""" 4508 try: 4509 if xddot is None: 4510 xinf_val = float(fsolve(xdot,x0,args,xtol=xtol)) 4511 else: 4512 xinf_val = float(newton_meth(xdot,x0,fprime=xddot,args=args)) 4513 except RuntimeError: 4514 xinf_val = NaN 4515 return xinf_val
4516 4517
4518 -def _find_min_pt(gen, q, pos1, pts, pars_to_vars, iso_ev, other_evnames, 4519 offset=4):
4520 ts = pts['t'] 4521 if pos1 > offset: 4522 t_pos1n = ts[pos1-offset] 4523 p = pts[pos1-offset] 4524 else: 4525 dt = ts[pos1+1] - ts[pos1] 4526 t_pos1n = ts[pos1] - offset*dt 4527 p = pts[pos1-offset] # OK b/c assume periodic orbit 4528 if pos1 < len(pts)-offset: 4529 t_pos1p = ts[pos1+offset] 4530 else: 4531 dt = ts[pos1] - ts[pos1-1] 4532 t_pos1p = ts[pos1] + offset*dt 4533 tdata_local = [0, t_pos1p-t_pos1n + 1e-5] 4534 gen.eventstruct.setActiveFlag(iso_ev, True) 4535 for evn in other_evnames: 4536 gen.eventstruct.setActiveFlag(evn, False) 4537 qpars = {} 4538 for parname, varname in pars_to_vars.items(): 4539 qpars[parname] = q[varname] 4540 gen.set(ics=p, tdata=tdata_local, pars=qpars) 4541 ## # temp 4542 ## vs=dict(p) 4543 ## vs['t']=tdata_local[0] 4544 ## print "\n" 4545 ## print "dist 1 = ", norm(p-q) 4546 ## print "ev fn 1 = ", gen.eventstruct.events[iso_ev]._fn(vs, gen.pars) 4547 ## print "ptset min dist = ", norm(pts[pos1]-q) 4548 ## vs=dict(pts[pos1+2]) 4549 ## vs['t']=tdata_local[1] 4550 ## print "dist 2 = ", norm(pts[pos1+2]-q) 4551 ## print "ev fn 2 = ", gen.eventstruct.events[iso_ev]._fn(vs, gen.pars) 4552 ## # end temp 4553 test_traj2 = gen.compute('test2') 4554 gen.eventstruct.setActiveFlag(iso_ev, False) 4555 # assert presence of terminal event 4556 perp_ev = gen.getEvents()[iso_ev] 4557 try: 4558 assert len(perp_ev) == 1 4559 t_ev = perp_ev['t'][0] + tdata_local[0] 4560 except: 4561 print "Event name:", iso_ev 4562 print perp_ev 4563 raise ValueError("No nearest point found with event") 4564 return (perp_ev, t_ev)
4565 4566
4567 -class base_n_counter(object):
4568 """Simple counter in base-n, using d digits. 4569 Resets to zeros when state (n-1, n-1, ..., n-1) is 4570 incremented. 4571 4572 base_n_counter(n, d) 4573 4574 Public attribute: 4575 counter 4576 4577 Implements methods: 4578 inc -- increment counter (returns nothing) 4579 __getitem__ -- return ith component of counter 4580 reset -- set counter to (0, 0, ..., 0) 4581 """
4582 - def __init__(self, n, d):
4583 self._maxval = n-1 4584 self._d = d 4585 self.counter = np.zeros((d,))
4586
4587 - def inc(self):
4588 ix = 0 4589 while True: 4590 if ix == self._d: 4591 self.counter = np.zeros((self._d,)) 4592 break 4593 if self.counter[ix] < self._maxval: 4594 self.counter[ix] += 1 4595 break 4596 else: 4597 self.counter[ix] = 0 4598 ix += 1
4599
4600 - def __getitem__(self, i):
4601 try: 4602 return self.counter[i] 4603 except IndexError: 4604 raise IndexError("Invalid index for counter")
4605
4606 - def reset(self):
4607 self.counter = np.zeros((self._d,))
4608
4609 - def __str__(self):
4610 return str(self.counter.tolist())
4611 4612 __repr__ = __str__
4613