1 """
2 Parser utilities.
3
4 Robert Clewley, September 2005.
5
6 Includes AST code by Pearu Peterson and Ryan Gutenkunst,
7 modified by R. Clewley.
8 """
9
10
11 from __future__ import division
12 from errors import *
13 from common import *
14 import re
15 import math, random
16 from numpy import alltrue, sometrue
17 import numpy as np
18 from copy import copy, deepcopy
19 import parser, symbol, token
20
21
22
23
24
25 DO_POW=True
26
27
28 DO_DEC=True
29
30
31
32
33 protected_scipynames = ['sign', 'mod']
34 specialfns = ['airy', 'airye', 'ai_zeros', 'bi_zeros', 'ellipj',
35 'ellipk', 'ellipkinc', 'ellipe', 'ellipeinc', 'jn',
36 'jv', 'jve', 'yn', 'yv', 'yve', 'kn', 'kv', 'kve',
37 'iv', 'ive', 'hankel1', 'hankel1e', 'hankel2',
38 'hankel2e', 'lmbda', 'jnjnp_zeros', 'jnyn_zeros',
39 'jn_zeros', 'jnp_zeros', 'yn_zeros', 'ynp_zeros',
40 'y0_zeros', 'y1_zeros', 'y1p_zeros', 'j0', 'j1',
41 'y0', 'y1', 'i0', 'i0e', 'i1', 'i1e', 'k0', 'k0e',
42 'k1', 'k1e', 'itj0y0', 'it2j0y0', 'iti0k0', 'it2i0k0',
43 'besselpoly', 'jvp', 'yvp', 'kvp', 'ivp', 'h1vp',
44 'h2vp', 'sph_jn', 'sph_yn', 'sph_jnyn', 'sph_in',
45 'sph_kn', 'sph_inkn', 'riccati_jn', 'riccati_yn',
46 'struve', 'modstruve', 'itstruve0', 'it2struve0',
47 'itmodstruve0', 'bdtr', 'bdtrc', 'bdtri', 'btdtr',
48 'btdtri', 'fdtr', 'fdtrc', 'fdtri', 'gdtr', 'gdtrc',
49 'gdtria', 'nbdtr', 'nbdtrc', 'nbdtri', 'pdtr', 'pdtrc',
50 'pdtri', 'stdtr', 'stdtridf', 'stdtrit', 'chdtr', 'chdtrc',
51 'chdtri', 'ndtr', 'ndtri', 'smirnov', 'smirnovi',
52 'kolmogorov', 'kolmogi', 'tklmbda', 'gamma',
53 'gammaln', 'gammainc', 'gammaincinv', 'gammaincc',
54 'gammainccinv', 'beta', 'betaln', 'betainc',
55 'betaincinv', 'psi',
56 'digamma', 'rgamma', 'polygamma', 'erf', 'erfc',
57 'erfinv', 'erfcinv', 'erf_zeros', 'fresnel',
58 'fresnel_zeros', 'fresnelc_zeros', 'fresnels_zeros',
59 'modfresnelp', 'modfresnelm', 'lpn', 'lqn', 'lpmn',
60 'lqmn', 'lpmv', 'sph_harm', 'legendre', 'chebyt',
61 'chebyu', 'chebyc', 'chebys', 'jacobi', 'laguerre',
62 'genlaguerre', 'hermite', 'hermitenorm', 'gegenbauer',
63 'sh_legendre', 'sh_chebyt', 'sh_chebyu', 'sh_jacobi',
64 'hyp2f1', 'hyp1f1', 'hyperu', 'hyp0f1', 'hyp2f0',
65 'hyp1f2', 'hyp3f0', 'pbdv', 'pbvv', 'pbwa', 'pbdv_seq',
66 'pbvv_seq', 'pbdn_seq', 'mathieu_a', 'mathieu_b',
67 'mathieu_even_coef', 'mathieu_odd_coef', 'mathieu_cem',
68 'mathieu_sem', 'mathieu_modcem1', 'mathieu_modcem2',
69 'mathieu_modsem1', 'mathieu_modsem2', 'pro_ang1',
70 'pro_rad1', 'pro_rad2', 'obl_ang1', 'obl_rad1',
71 'obl_rad2', 'pro_cv', 'obl_cv', 'pro_cv_seq',
72 'obl_cv_seq', 'pro_ang1_cv', 'pro_rad1_cv',
73 'pro_rad2_cv', 'obl_ang1_cv', 'obl_rad1_cv',
74 'obl_rad2_cv', 'kelvin', 'kelvin_zeros', 'ber',
75 'bei', 'berp', 'beip', 'ker', 'kei', 'kerp', 'keip',
76 'ber_zeros', 'bei_zeros', 'berp_zeros', 'beip_zeros',
77 'ker_zeros', 'kei_zeros', 'kerp_zeros', 'keip_zeros',
78 'expn', 'exp1', 'expi', 'wofz', 'dawsn', 'shichi',
79 'sici', 'spence', 'zeta', 'zetac', 'cbrt', 'exp10',
80 'exp2', 'radian', 'cosdg', 'sindg', 'tandg', 'cotdg',
81 'log1p', 'expm1', 'cosm1', 'round']
82 protected_specialfns = ['special_'+s for s in specialfns]
83 protected_mathnames = filter(lambda s: not s.startswith('__'), \
84 dir(math))
85 protected_randomnames = filter(lambda s: not s.startswith('_'), \
86 dir(random))
87
88
89 builtin_auxnames = ['globalindepvar', 'initcond', 'heav', 'if',
90 'getindex', 'getbound']
91
92 protected_macronames = ['for', 'if', 'max', 'min', 'sum']
93
94 reserved_keywords = ['and', 'not', 'or', 'del', 'for', 'if', 'is', 'raise',
95 'assert', 'elif', 'from', 'lambda', 'return', 'break', 'else',
96 'global', 'try', 'class', 'except', 'while',
97 'continue', 'exec', 'import', 'pass', 'yield', 'def',
98 'finally', 'in', 'print', 'as', 'None']
99
100 convert_power_reserved_keywords = ['del', 'for', 'if', 'is', 'raise',
101 'assert', 'elif', 'from', 'lambda', 'return', 'break', 'else',
102 'global', 'try', 'class', 'except', 'while',
103 'continue', 'exec', 'import', 'pass', 'yield', 'def',
104 'finally', 'in', 'print', 'as', 'None']
105
106
107 protected_allnames = protected_mathnames + protected_scipynames \
108 + protected_specialfns + protected_randomnames \
109 + builtin_auxnames + protected_macronames \
110 + ['abs', 'pow', 'min', 'max', 'sum']
111
112
113
114
115 builtinFnSigInfo = {'globalindepvar': 1, 'initcond': 1, 'heav': 1, 'getindex': 1,
116 'if': 3, 'for': 4, 'getbound': 2, 'max': 1, 'min': 1}
117
118
119
120
121 _functions = ['readArgs', 'findEndBrace', 'makeParList', 'joinStrs',
122 'parseMatrixStrToDictStr', 'joinAsStrs', 'replaceSep',
123 'wrapArgInCall', 'addArgToCalls', 'findNumTailPos',
124 'isToken', 'isNameToken', 'isNumericToken', 'count_sep',
125 'isHierarchicalName', 'replaceSepInv', 'replaceSepListInv',
126 'replaceSepList', 'convertPowers', 'mapNames',
127 'replaceCallsWithDummies', 'isIntegerToken', 'proper_match',
128 'remove_indices_from_range']
129
130 _objects = ['protected_auxnamesDB', 'protected_allnames', 'protected_macronames',
131 'protected_mathnames', 'protected_randomnames', 'builtin_auxnames',
132 'protected_scipynames', 'protected_specialfns', 'builtinFnSigInfo']
133
134 _classes = ['symbolMapClass', 'parserObject', 'auxfnDBclass']
135
136 _constants = ['name_chars_RE', 'num_chars', 'ZEROS', 'ONES', 'NAMESEP',
137 '_indentstr']
138
139 _symbfuncs = ['simplify', 'simplify_str', 'ensurebare', 'ensureparen',
140 'trysimple', 'ensuredecimalconst', 'doneg', 'dosub', 'doadd',
141 'dodiv', 'domul', 'dopower', 'splitastLR', 'ast2string', 'string2ast',
142 'sym2name', 'ast2shortlist', 'splitargs', 'mapPowStr',
143 'toPowSyntax', 'ensureparen_div']
144
145 _symbconsts = ['syms']
146
147 __all__ = _functions + _classes + _objects + _constants + _symbfuncs + _symbconsts
148
149
150
151
152 name_chars_RE = re.compile('\w')
153 alphabet_chars_RE = re.compile('[a-zA-Z0-9]')
154 num_chars = map(lambda i: str(i), range(10))
155
156 if DO_POW:
157 POW_STR = 'pow(%s,%s)'
158 else:
159 POW_STR = '%s**%s'
160
161 if DO_DEC:
162 ZEROS = ['0','0.0','0.','(0)','(0.0)','(0.)']
163 ONES = ['1','1.0','1.','(1)','(1.0)','(1.)']
164 TENS = ['10', '10.0', '10.','(10)','(10.0)','(10.)']
165 else:
166 ZEROS = ['0']
167 ONES = ['1']
168 TENS = ['10']
169
170
171 NAMESEP = '.'
172
173
174 _indentstr = " "
175
176
177
178
179
180
181 syms=token.tok_name
182 for s in symbol.sym_name.keys():
183 syms[s]=symbol.sym_name[s]
184
185
187 """Input an expression of the form 'pow(x,y)'. Outputs an expression of the
188 form x**y, x^y, or pow(x,y) and where x and y have also been processed to the
189 target power syntax.
190 Written by R. Clewley"""
191 ll = splitargs(ast2string(t[2])[1:-1])
192 if p=='**':
193 lpart = dopower(ensureparen(ast2string(toDoubleStarSyntax(string2ast(ll[0]))),1),
194 ensureparen(ast2string(toDoubleStarSyntax(string2ast(ll[1]))),1),
195 '%s**%s')
196 if len(t) > 3:
197 res = ensureparen(ast2string(toDoubleStarSyntax(['power',string2ast('__LPART__')]+t[3:])))
198 return res.replace('__LPART__', lpart)
199 else:
200 return ensureparen(lpart,1)
201 elif p=='^':
202 lpart = dopower(ensureparen(ast2string(toCircumflexSyntax(string2ast(ll[0]))),1),
203 ensureparen(ast2string(toCircumflexSyntax(string2ast(ll[1]))),1),
204 '%s^%s')
205 if len(t) > 3:
206 res = ensureparen(ast2string(toCircumflexSyntax(['power',string2ast('__LPART__')]+t[3:])),1)
207 return res.replace('__LPART__', lpart)
208 else:
209 return ensureparen(lpart,1)
210 elif p=='pow':
211 lpart = dopower(ensurebare(ast2string(toPowSyntax(string2ast(ll[0])))),
212 ensurebare(ast2string(toPowSyntax(string2ast(ll[1])))),
213 'pow(%s,%s)')
214 if len(t) > 3:
215 res = ensurebare(ast2string(toPowSyntax(['power',string2ast('__LPART__')]+t[3:])))
216 return res.replace('__LPART__', lpart)
217 else:
218 return ensureparen(lpart,1)
219 else:
220 raise ValueError("Invalid power operator")
221
222
243
244
269
270
272
273 if isinstance(t[0],str):
274 if t[0] == 'power':
275 try:
276 if t[2][0]=='DOUBLESTAR':
277 try:
278 return string2ast(dopower(ensurebare(ast2string(toPowSyntax(t[1]))),
279 ensurebare(ast2string(toPowSyntax(t[3]))),
280 'pow(%s,%s)'))
281 except IndexError:
282
283
284 return t
285 elif t[1][1] == 'pow':
286 return string2ast(ensureparen(mapPowStr(t,'pow'),1))
287 elif len(t)>3 and t[3][0]=='DOUBLESTAR':
288 try:
289 return string2ast(dopower(ensurebare(ast2string(toPowSyntax(t[1:3]))),
290 ensurebare(ast2string(toPowSyntax(t[4]))),
291 'pow(%s,%s)'))
292 except IndexError:
293
294
295 return t
296 except:
297 print t
298 print ast2string(t)
299 raise
300 elif t[0] == 'xor_expr' and t[2][0]=='CIRCUMFLEX':
301
302
303
304
305 tc = copy(t)
306 tc[0] = 'power'
307 tc[2] = ['DOUBLESTAR', '**']
308 return toPowSyntax(string2ast(ast2string(tc)))
309 o = []
310 for i in t:
311 if isinstance(i,list):
312 if type(i[0]) == str and i[0].islower():
313 o.append(toPowSyntax(i))
314 else:
315 o.append(i)
316 else:
317 o.append(i)
318 return o
319
320
326
332
333
363
364
383
384
385 -def splitargs(da, lbraces=['('], rbraces=[')']):
386 """Function to split string-delimited arguments in a string without
387 being fooled by those that occur in function calls.
388 Written by Pearu Peterson. Adapted by Rob Clewley to accept different
389 braces."""
390 if alltrue([da.find(lbrace)<0 for lbrace in lbraces]):
391 return da.split(',')
392 ll=[];o='';ii=0
393 for i in da:
394 if i==',' and ii==0:
395 ll.append(o)
396 o=''
397 else:
398 if i in lbraces: ii=ii+1
399 if i in rbraces: ii=ii-1
400 o=o+i
401 ll.append(o)
402 return ll
403
405 if type(t) is parser.ASTType: return ast2shortlist(t.tolist())
406 if not isinstance(t, list): return t
407 if t[1] == '': return None
408 if not isinstance(t[1], list): return t
409 if len(t) == 2 and isinstance(t[1], list):
410 return ast2shortlist(t[1])
411 o=[]
412 for tt in map(ast2shortlist, t[1:]):
413 if tt is not None:
414 o.append(tt)
415 if len(o)==1: return o[0]
416 return [t[0]]+o
417
419 if type(t) is parser.ASTType: return sym2name(t.tolist())
420 if not isinstance(t, list): return t
421 return [syms[t[0]]]+map(sym2name,t[1:])
422
425
427
428 if type(t) is parser.ASTType: return ast2string(t.tolist())
429 if not isinstance(t, list): return None
430 if not isinstance(t[1], list): return t[1]
431 o=''
432 for tt in map(ast2string,t):
433 if isinstance(tt, str):
434 o=o+tt
435 return o
436
438 lft=t[1]
439 rt=t[3:]
440 if len(rt)>1:
441 rt=[t[0]]+rt
442 else:
443 rt=rt[0]
444 return lft,rt
445
459
461 if l in ZEROS or r in ZEROS: return '0'
462 if l in ONES: return r
463 if r in ONES: return l
464 if l in ['-'+o for o in ONES]: return doneg(r)
465 if r in ['-'+o for o in ONES]: return doneg(l)
466 lft = string2ast(l)
467 rt = string2ast(r)
468 lft_neg = lft[0] == 'factor' and lft[1][0]=='MINUS'
469 rt_neg = rt[0] == 'factor' and rt[1][0]=='MINUS'
470 if lft_neg:
471 new_l = l[1:]
472 else:
473 new_l = l
474 if rt_neg:
475 new_r = r[1:]
476 else:
477 new_r = r
478 if lft_neg and rt_neg or not (lft_neg or rt_neg):
479 return trysimple('%s*%s'%(ensureparen(new_l,ismul=1),
480 ensureparen(new_r,ismul=1)))
481 else:
482 return trysimple('-%s*%s'%(ensureparen(new_l,ismul=1),
483 ensureparen(new_r,ismul=1)))
484
486 if r in ZEROS: raise ValueError("Division by zero in expression")
487 if l in ZEROS: return '0'
488 if r in ONES: return l
489
490 if r in ['-'+o for o in ONES]: return doneg(l)
491 if r==l: return '1'
492 lft = string2ast(l)
493 rt = string2ast(r)
494 lft_neg = lft[0] == 'factor' and lft[1][0]=='MINUS'
495 rt_neg = rt[0] == 'factor' and rt[1][0]=='MINUS'
496 if lft_neg:
497 new_l = l[1:]
498 else:
499 new_l = l
500 if rt_neg:
501 new_r = r[1:]
502 else:
503 new_r = r
504 if lft_neg and rt_neg or not (lft_neg or rt_neg):
505 return trysimple('%s/%s'%(ensureparen(ensuredecimalconst(new_l),ismul=1,do_decimal=DO_DEC),
506 ensureparen(ensuredecimalconst(new_r),1,do_decimal=DO_DEC)),
507 do_decimal=DO_DEC)
508 else:
509 return trysimple('-%s/%s'%(ensureparen(ensuredecimalconst(new_l),ismul=1,do_decimal=DO_DEC),
510 ensureparen(ensuredecimalconst(new_r),1,do_decimal=DO_DEC)),
511 do_decimal=DO_DEC)
512
520
528
557
560
562 try:
563 t_e = eval(t, {}, {})
564 add_point = do_decimal and t_e != 0 and DO_DEC
565 if type(t_e) == int and add_point:
566 t = repr(t_e)+".0"
567 elif type(t_e) == float and int(t_e)==t_e:
568
569
570 if add_point:
571 t = repr(t_e)
572 else:
573 t = repr(int(t_e))
574 else:
575 t = repr(t_e)
576 except:
577 pass
578 return t
579
587
589 t=trysimple(t, do_decimal)
590 tt=string2ast(t)
591 if t[0]=='-':
592 if tt[0] == 'factor':
593
594 return t
595 else:
596
597 return '(%s)'%t
598 if tt[0]=='arith_expr':
599
600 return '(%s)'%t
601 if flag>0:
602 if tt[0] == 'term':
603 return '(%s)'%t
604 elif tt[0] == 'power':
605 if tt[1] == ['NAME', 'pow']:
606 return t
607 else:
608
609 for x in tt[1:]:
610 if x[0] == 'arith_expr':
611 return '(%s)'%t
612 elif tt[0] == 'xor_expr':
613 if len(tt)>3:
614 for x in tt[1:]:
615 if x[0] == 'arith_expr':
616 return '(%s)'%t
617 else:
618 return t
619
620
621
622
623
624
625
626
627 return t
628
630 """Ensure no braces in string expression (where possible).
631 Written by Robert Clewley"""
632 t=trysimple(t)
633 try:
634 if t[0]=='(' and t[-1]==')':
635 if t[1:-1].find('(') < t[1:-1].find(')'):
636 return t[1:-1]
637 else:
638
639 return t
640 else:
641 return t
642 except IndexError:
643 return t
644
646 """Re-arrange 'term' or 'arith_expr' expressions to combine numbers.
647 Numbers go first unless in a sum the numeric term is negative and not all
648 the remaining terms are negative."""
649
650
651 if t[0] == 'arith_expr':
652 args = [simplify(a) for a in t[1::2]]
653 numargs = len(args)
654 if args[0][0] == 'factor' and args[0][1][0] == 'MINUS':
655 ops = [-1]
656
657 args[0] = args[0][2]
658 else:
659 ops = [1]
660 for op_t in t[2::2]:
661 if op_t[0]=='PLUS':
662 ops.append(1)
663 else:
664 ops.append(-1)
665 num_ixs = []
666 oth_ixs = []
667 for i, a in enumerate(args):
668 if a[0]=='NUMBER':
669 num_ixs.append(i)
670 else:
671 oth_ixs.append(i)
672 res_num = '0'
673
674 for nix in num_ixs:
675 if ops[nix] > 0:
676 res_num=doadd(res_num,ast2string(simplify(args[nix])))
677 else:
678 res_num=dosub(res_num,ast2string(simplify(args[nix])))
679
680 res_oth = '0'
681 for oix in oth_ixs:
682 if ops[oix] > 0:
683 res_oth=doadd(res_oth,ast2string(simplify(args[oix])))
684 else:
685 res_oth=dosub(res_oth,ast2string(simplify(args[oix])))
686 if res_num[0] == '-' and res_oth[0] != '-':
687
688 return string2ast(doadd(res_oth,res_num))
689 else:
690 return string2ast(doadd(res_num,res_oth))
691 elif t[0] == 'term':
692 args = [simplify(a) for a in t[1::2]]
693 numargs = len(args)
694 ops = [1]
695 for op_t in t[2::2]:
696 if op_t[0]=='STAR':
697 ops.append(1)
698 else:
699 ops.append(-1)
700 num_ixs = []
701 oth_ixs = []
702 for i, a in enumerate(args):
703 if a[0]=='NUMBER':
704 num_ixs.append(i)
705 else:
706 oth_ixs.append(i)
707 res_numerator = '1'
708 res_denominator = '1'
709
710 for nix in num_ixs:
711 if ops[nix] > 0:
712 res_numerator=domul(res_numerator,ast2string(simplify(args[nix])))
713 else:
714 res_denominator=domul(res_denominator,ast2string(simplify(args[nix])))
715
716 for oix in oth_ixs:
717 if ops[oix] > 0:
718 res_numerator=domul(res_numerator,ast2string(simplify(args[oix])))
719 else:
720 res_denominator=domul(res_denominator,ast2string(simplify(args[oix])))
721 return string2ast(dodiv(res_numerator,res_denominator))
722 else:
723 return t
724
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
882 """Abstract class for hassle-free symbol re-mappings."""
884 if isinstance(symbolMap, symbolMapClass):
885 self.lookupDict = copy(symbolMap.lookupDict)
886 elif symbolMap is None:
887 self.lookupDict = {}
888 else:
889 self.lookupDict = copy(symbolMap)
890
892 if isinstance(arg, str):
893 if arg in self.lookupDict:
894 return self.lookupDict[arg]
895 else:
896 try:
897 po = parserObject(arg, False)
898 except:
899
900 return arg
901 else:
902 if len(po.tokenized) <= 1:
903
904 return arg
905 else:
906 return "".join(mapNames(self,po.tokenized))
907 elif hasattr(arg, 'mapNames'):
908
909 res = copy(arg)
910 res.mapNames(self)
911 return res
912 elif hasattr(arg, 'coordnames'):
913
914
915 res = copy(arg)
916 try:
917 res.mapNames(self)
918 except AttributeError:
919 raise TypeError("symbolMapClass does not know how to "
920 "process this type of argument")
921 return res
922 elif hasattr(arg, 'iteritems'):
923
924 try:
925 res = copy(arg)
926 except TypeError:
927
928 res = arg
929 try:
930 for k, v in arg.iteritems():
931 new_k = self.__call__(k)
932 new_v = self.__call__(v)
933 res[new_k] = new_v
934
935 if k != new_k:
936 del res[k]
937 except TypeError:
938
939 if isinstance(arg, tuple):
940 return tuple([self.__getitem__(v) for v in arg])
941 else:
942 return arg
943 except:
944 raise TypeError("symbolMapClass does not know how to "
945 "process this type of argument")
946 return res
947 else:
948
949
950 try:
951 res = copy(arg)
952 except TypeError:
953
954 res = arg
955 try:
956 for i, v in enumerate(arg):
957
958 res[i] = self(v)
959 except TypeError:
960
961 if isinstance(arg, tuple):
962 return tuple([self(v) for v in arg])
963 else:
964 try:
965 return self.__getitem__(res)
966 except:
967 raise TypeError("symbolMapClass does not know how to "
968 "process this type of argument (%s)"%str(type(arg)))
969 except:
970 try:
971 return self.__getitem__(res)
972 except:
973 raise TypeError("symbolMapClass does not know how to "
974 "process this type of argument (%s)"%str(type(arg)))
975 else:
976 return res
977
979 try:
980 return self.lookupDict == other.lookupDict
981 except AttributeError:
982 return False
983
985 return not self.__eq__(other)
986
989
991 self.lookupDict[symbol] = mappedsymbol
992
994 try:
995 return self.lookupDict[symbol]
996 except (KeyError, TypeError):
997 return symbol
998
1000 del self.lookupDict[symbol]
1001
1004
1006 return self.lookupDict.keys()
1007
1009 return self.lookupDict.values()
1010
1012 return self.lookupDict.items()
1013
1016
1019
1022
1026
1028 try:
1029
1030 self.lookupDict.update(amap.lookupDict)
1031 except AttributeError:
1032
1033 self.lookupDict.update(amap)
1034
1036 """Return numpy array of indices that can be used to re-order
1037 a list of values that have been sorted by this symbol map object,
1038 so that the list becomes ordered according to the alphabetical
1039 order of the map's keys.
1040 """
1041
1042 keys, vals = sortedDictLists(self.lookupDict, byvalue=False)
1043 return np.argsort(vals)
1044
1045
1047 return len(self.lookupDict)
1048
1051
1053 return "Symbol mapping"
1054
1055 __str__ = __repr__
1056
1058 return k in self.lookupDict
1059
1060
1062 """Auxiliary function database, for use by parsers."""
1065
1066 - def addAuxFn(self, auxfnName, parserObj):
1067 if parserObj not in self.auxnames:
1068 self.auxnames[parserObj] = auxfnName
1069 else:
1070 raise ValueError("Parser object " + parserObj.name + " already "
1071 "exists in auxiliary function database")
1072
1074 return "ModelSpec internal helper class: auxfnDBclass object"
1075
1076 __str__ = __repr__
1077
1079 if parserObj is None:
1080
1081 return self.auxnames.values()
1082 else:
1083 try:
1084 return [self.auxnames[parserObj]]
1085 except KeyError:
1086 return []
1087
1089 flagdelete = None
1090 for k, v in self.auxnames.iteritems():
1091 if v == auxfnName:
1092 flagdelete = k
1093 break
1094 if flagdelete is not None:
1095 del self.auxnames[k]
1096
1097 - def clear(self, parserObj):
1098 if parserObj in self.auxnames:
1099 del self.auxnames[parserObj]
1100
1103
1104
1105
1106 global protected_auxnamesDB
1107 protected_auxnamesDB = auxfnDBclass()
1108
1109
1111 """Alphanumeric symbol (pseudo-)parser for mathematical expressions.
1112
1113 An AST is not properly implemented -- rather, we tokenize,
1114 identify free symbols, and apply a small number of syntactic rule checks.
1115 The target language parser is relied upon for full syntax checking.
1116 """
1117
1118 - def __init__(self, specStr, includeProtected=True,
1119 treatMultiRefs=False, ignoreTokens=[],
1120 preserveSpace=False):
1121
1122
1123 self.usedSymbols = []
1124 self.freeSymbols = []
1125 self.preserveSpace = preserveSpace
1126
1127 self.tokenized = []
1128 if type(specStr) is str:
1129 self.specStr = specStr
1130 else:
1131 print "Found type", type(specStr), ": ", specStr
1132 raise TypeError("specStr must be a string")
1133 self.treatMultiRefs = treatMultiRefs
1134
1135 self.ignoreTokens = copy(ignoreTokens)
1136 self.includeProtected = includeProtected
1137
1138
1139 self.parse(ignoreTokens, None, includeProtected, reset=True)
1140
1141
1143 """Function to verify whether an expression is 'compound',
1144 in the sense that it has an operator at the root of its syntax
1145 parse tree (i.e. not inside braces)."""
1146 result = False
1147 nested = 0
1148 if len(self.tokenized) > 2:
1149 stage = 0
1150 for s in self.tokenized:
1151 if stage == 0:
1152 if s in self.usedSymbols and nested == 0:
1153 stage = 1
1154 elif s == ')':
1155 stage = 1
1156 nested = max([0, nested-1])
1157 elif s == '(':
1158 nested += 1
1159 elif stage == 1:
1160 if s in ops and nested == 0:
1161 stage = 2
1162 elif s == '(':
1163 nested += 1
1164 elif s == ')':
1165 nested = max([0, nested-1])
1166 elif nested == 0:
1167 stage = 0
1168 elif stage == 2:
1169 if s in self.usedSymbols and nested == 0:
1170 stage = 3
1171 elif s == '(':
1172 stage = 3
1173 nested += 1
1174 elif s == ')':
1175 nested = max([0, nested-1])
1176 elif nested == 0:
1177 stage = 0
1178 if stage == 3:
1179 result = True
1180 break
1181 return result
1182
1183 - def __call__(self, specialtoks=None, symbolMap=None, includeProtected=True):
1184 if specialtoks is None:
1185 if self.ignoreTokens is not None:
1186 specialtoks = self.ignoreTokens
1187 else:
1188 specialtoks = []
1189 if self.tokenized == []:
1190 return self.parse(specialtoks, symbolMap, includeProtected)
1191 else:
1192 if symbolMap is None:
1193 return "".join(self.tokenized)
1194 else:
1195 return "".join(symbolMap(self.tokenized))
1196
1197 - def find(self, token):
1198 """Find all occurrences of the given token in the expression, returning a list
1199 of indices (empty if not present).
1200 """
1201 if self.tokenized == []:
1202 self.parse([])
1203 return [i for i, t in enumerate(self.tokenized) if t == token]
1204
1205 - def parse(self, specialtoks, symbolMap=None, includeProtected=True,
1206 reset=False):
1207 if reset:
1208 self.usedSymbols = []
1209 self.freeSymbols = []
1210 if symbolMap is None:
1211
1212 symbolMap = lambda x: x
1213 specialtokens = specialtoks + ['('] + self.usedSymbols
1214 if includeProtected:
1215 specialtokens.extend(protected_allnames)
1216 protected_auxnames = protected_auxnamesDB(self)
1217 else:
1218 protected_auxnames = []
1219 if self.treatMultiRefs:
1220 specialtokens.append('[')
1221 dohierarchical = '.' not in specialtokens
1222 allnames = specialtokens + protected_auxnames
1223 specstr = self.specStr
1224 returnstr = ""
1225 if specstr == "":
1226
1227 specstr = " "
1228 elif specstr[-1] != ')':
1229
1230
1231
1232
1233 specstr += " "
1234 scount = 0
1235 speclen = len(specstr)
1236
1237 used = copy(self.usedSymbols)
1238 free = copy(self.freeSymbols)
1239 tokenized = []
1240
1241
1242 s = ''
1243 foundtoken = False
1244 while scount < speclen:
1245 stemp = specstr[scount]
1246
1247
1248
1249
1250
1251
1252 scount += 1
1253 if name_chars_RE.match(stemp) is None:
1254
1255 if s not in ['', ' ', '\n', '\t']:
1256
1257
1258
1259 if s in allnames:
1260 snew = symbolMap(s)
1261 tokenized.append(snew)
1262 if snew not in used:
1263 used.append(snew)
1264 returnstr += snew
1265 else:
1266
1267
1268
1269
1270 isnumtok = isNumericToken(s)
1271 issimpledec = stemp == '.' and isnumtok
1272 isexpdec = s[-1] in ['e','E'] and s[0] not in ['e','E']\
1273 and stemp in ['-','+'] and isnumtok
1274 isdecimal = issimpledec or isexpdec
1275 ishierarchicalname = stemp == '.' and isNameToken(s)
1276 if isdecimal or (ishierarchicalname and dohierarchical):
1277
1278 s += stemp
1279 continue
1280 else:
1281
1282 snew = symbolMap(s)
1283 if s[0] not in num_chars + ['+','-'] \
1284 and snew not in free:
1285 free.append(snew)
1286 tokenized.append(snew)
1287 if snew not in used:
1288 used.append(snew)
1289 returnstr += snew
1290 if stemp in ['+', '-']:
1291
1292 try:
1293 next_stemp = specstr[scount]
1294 except IndexError:
1295 tokenized.append(stemp)
1296 returnstr += stemp
1297 s = ''
1298 continue
1299 if (tokenized==[] or tokenized[-1]=='(') and \
1300 next_stemp in num_chars + ['.']:
1301
1302 s += stemp
1303 continue
1304 elif len(tokenized)>0 and tokenized[-1] == '+':
1305
1306 if stemp == '-':
1307 tokenized[-1] = '-'
1308 returnstr = returnstr[:-1] + '-'
1309
1310 s = ''
1311 continue
1312 elif len(tokenized)>0 and tokenized[-1] == '-':
1313
1314 if stemp == '-':
1315 tokenized[-1] = '+'
1316 returnstr = returnstr[:-1] + '+'
1317
1318 s = ''
1319 continue
1320 else:
1321 tokenized.append(stemp)
1322 returnstr += stemp
1323 s = ''
1324 continue
1325 elif stemp in ['`', '!', '@', '#', '$', '{',
1326 '}', "\\"]:
1327 if stemp in specialtokens:
1328 tokenized.append(stemp)
1329 returnstr += stemp
1330 s = ''
1331 continue
1332 else:
1333
1334
1335
1336
1337 print "Problem with string '%s'"%specstr
1338 raise ValueError('Symbol %s is illegal. '%stemp)
1339 elif stemp == '[':
1340
1341
1342 if self.treatMultiRefs and len(tokenized)>0 \
1343 and (tokenized[-1].isalnum() or \
1344 ('[' in specialtokens and not ( \
1345 isVectorClause(specstr[scount-1:]) or \
1346 len(tokenized)>1 and tokenized[-2] in \
1347 ('max', 'min', 'max_', 'min_')))):
1348
1349 s = '['
1350 elif '[' in specialtokens:
1351 returnstr += '['
1352
1353 tokenized.append('[')
1354 s = ''
1355 continue
1356 else:
1357 raise ValueError("Syntax error: Square braces not to "
1358 "be used outside of multiple Quantity"
1359 " definitions and references")
1360
1361
1362
1363
1364
1365 else:
1366 if stemp == "*":
1367 if len(returnstr)>1 and returnstr[-1] == "*":
1368
1369 if tokenized[-1] == '*':
1370 tokenized[-1] = '**'
1371 else:
1372 tokenized.append('**')
1373 s = ''
1374 returnstr += stemp
1375 continue
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 else:
1388
1389 tokenized.append('*')
1390 elif stemp == "=":
1391 if len(returnstr)>1:
1392
1393 if returnstr[-1] == ">":
1394 if tokenized[-1] == '>':
1395 tokenized[-1] = '>='
1396 else:
1397 tokenized.append('>=')
1398 s = ''
1399 returnstr += stemp
1400 continue
1401 elif returnstr[-1] == "<":
1402 if tokenized[-1] == '<':
1403 tokenized[-1] = '<='
1404 else:
1405 tokenized[-1] = '<='
1406 s = ''
1407 returnstr += stemp
1408 continue
1409 else:
1410 tokenized.append('=')
1411 else:
1412 tokenized.append('=')
1413 elif stemp in [" ","\t","\n"]:
1414 if self.preserveSpace: tokenized.append(stemp)
1415 else:
1416 tokenized.append(stemp)
1417 s = ''
1418 returnstr += stemp
1419 continue
1420 else:
1421 s += stemp
1422 if s in specialtokens:
1423
1424
1425
1426
1427
1428
1429
1430 if s == '[' and self.treatMultiRefs and len(tokenized)>0 \
1431 and (tokenized[-1].isalnum() or \
1432 ('[' in specialtokens and not isVectorClause(specstr[scount-1:]))):
1433
1434
1435
1436
1437
1438 foundtoken = False
1439
1440
1441 try:
1442 rbpos = specstr[scount:].index(']')
1443 except ValueError:
1444 raise ValueError("Mismatch [ and ] in spec")
1445
1446 expr = specstr[scount-1:scount+rbpos+1]
1447
1448
1449 temp = parserObject(expr[1:-1],
1450 includeProtected=False)
1451 if len(temp.freeSymbols) == 1:
1452 free.extend(temp.freeSymbols)
1453
1454
1455
1456
1457 else:
1458 raise ValueError("Invalid index clause in "
1459 "multiple quantity reference -- "
1460 "multiple index names used in [...]")
1461
1462 scount += rbpos+1
1463
1464 returnstr += expr
1465 tokenized.append(expr)
1466 used.append(expr)
1467 s = ''
1468 else:
1469 if scount < speclen - 1:
1470 if name_chars_RE.match(specstr[scount]) is None:
1471 foundtoken = True
1472 else:
1473 if s[-1] in ['e','E'] and s[0] not in ['e','E'] and \
1474 name_chars_RE.match(specstr[scount]).group() \
1475 in num_chars+['-','+']:
1476
1477
1478 foundtoken = True
1479 else:
1480 foundtoken = True
1481 if foundtoken:
1482 if includeProtected:
1483 if s == 'for':
1484
1485 if specstr[scount] != '(':
1486 print "Next char found:", specstr[scount]
1487 raise ValueError("Invalid 'for' macro syntax")
1488
1489
1490 try:
1491 rbpos = specstr[scount:].index(')')
1492 except ValueError:
1493 raise ValueError("Mismatch ( and ) in 'for' "
1494 "macro")
1495
1496 expr = specstr[scount:scount+rbpos+1]
1497
1498
1499 temp = parserObject(expr, includeProtected=False)
1500 macrotests = [len(temp.tokenized) == 7,
1501 temp.tokenized[2] == temp.tokenized[4] == ',',
1502 temp.tokenized[5] in ['+','*']]
1503 if not alltrue(macrotests):
1504 print "specstr was: ", specstr
1505 print "tokens: ", temp.tokenized
1506 print "test results: ", macrotests
1507 raise ValueError("Invalid sub-clause in "
1508 "'for' macro")
1509
1510 scount += rbpos+1
1511
1512 returnstr += s+expr
1513 tokenized.extend([s,expr])
1514 if s not in used:
1515 used.append(s)
1516 if expr not in used:
1517 used.append(expr)
1518 elif s == 'abs':
1519 snew = symbolMap(s)
1520 returnstr += snew
1521 tokenized.append(snew)
1522 if snew not in used:
1523 used.append(snew)
1524 elif s in protected_scipynames + protected_specialfns:
1525 snew = symbolMap(s)
1526 returnstr += snew
1527 tokenized.append(snew)
1528 if snew not in used:
1529 used.append(snew)
1530 elif s in protected_mathnames:
1531 if s in ['e','E']:
1532
1533
1534 if len(returnstr)>0:
1535 if returnstr[-1] not in num_chars+['.']:
1536 snew = symbolMap(s.lower())
1537 returnstr += snew
1538 tokenized.append(snew)
1539 if snew not in used:
1540 used.append(snew)
1541 else:
1542 returnstr += s
1543 else:
1544 snew = symbolMap(s.lower())
1545 returnstr += snew
1546 tokenized.append(snew)
1547 if snew not in used:
1548 used.append(snew)
1549 else:
1550 snew = symbolMap(s)
1551 returnstr += snew
1552 tokenized.append(snew)
1553 if snew not in used:
1554 used.append(snew)
1555 elif s in protected_randomnames:
1556 snew = symbolMap(s)
1557 returnstr += snew
1558 tokenized.append(snew)
1559 if snew not in used:
1560 used.append(snew)
1561 elif s in protected_auxnames:
1562 snew = symbolMap(s)
1563 returnstr += snew
1564 tokenized.append(snew)
1565 else:
1566
1567
1568
1569 snew = symbolMap(s)
1570 tokenized.append(snew)
1571 returnstr += snew
1572 else:
1573
1574
1575 snew = symbolMap(s)
1576 tokenized.append(snew)
1577 returnstr += snew
1578
1579 s = ''
1580 foundtoken = False
1581
1582 if reset:
1583
1584 actual_free = [sym for sym in free if sym in tokenized]
1585 for sym in [sym for sym in free if sym not in actual_free]:
1586 is_literal = False
1587 for tok in tokenized:
1588
1589
1590 if ('"' in tok or "'" in tok) and sym in tok:
1591 start_ix = tok.index(sym)
1592 end_ix = start_ix + len(sym) - 1
1593 if len(tok) > end_ix and start_ix > 0:
1594
1595 doub = tok[start_ix-1] == tok[end_ix+1] == '"'
1596 sing = tok[start_ix-1] == tok[end_ix+1] == "'"
1597 is_literal = doub or sing
1598 if not is_literal:
1599 actual_free.append(sym)
1600 self.usedSymbols = used
1601 self.freeSymbols = actual_free
1602 self.specStr = returnstr
1603 self.tokenized = tokenized
1604
1605 return returnstr.strip()
1606
1607
1608
1609
1610
1611 -def isToken(s, treatMultiRefs=False):
1612
1613 if not isinstance(s, str):
1614 return False
1615 try:
1616 temp = parserObject(s, includeProtected=False,
1617 treatMultiRefs=treatMultiRefs)
1618 except ValueError:
1619 return False
1620 if treatMultiRefs and s.find('[')>0:
1621 lenval = 2
1622 else:
1623 lenval = 1
1624 return not temp.isCompound() and len(temp.usedSymbols) == lenval \
1625 and len(temp.freeSymbols) == lenval \
1626 and len(temp.tokenized) == lenval
1627
1628
1630 return isToken(s, treatMultiRefs=treatMultiRefs) \
1631 and s[0] not in num_chars and not (len(s)==1 and s=="_")
1632
1634 return alltrue([t in '0123456789' for t in arg])
1635
1637
1638
1639 try:
1640 s = arg.lower()
1641 except AttributeError:
1642 return False
1643 try:
1644 if s[0] in ['+','-']:
1645 s_rest = s[1:]
1646 else:
1647 s_rest = s
1648 except IndexError:
1649 return False
1650 pts = s.count('.')
1651 exps = s.count('e')
1652 pm = s_rest.count('+') + s_rest.count('-')
1653 if pts > 1 or exps > 1 or pm > 1:
1654 return False
1655 if exps == 1:
1656 exp_pos = s.find('e')
1657 pre_exp = s[:exp_pos]
1658
1659 if not sometrue([n in num_chars for n in pre_exp]):
1660 return False
1661 if s[-1]=='e':
1662
1663 return False
1664 if not sometrue([n in num_chars for n in s[exp_pos:]]):
1665 return False
1666
1667 if pm == 1:
1668 pm_pos = max([s_rest.find('+'), s_rest.find('-')])
1669 if s_rest[pm_pos-1] != 'e':
1670 return False
1671 e_rest = s_rest[pm_pos+1:]
1672 else:
1673 e_rest = s[exp_pos+1:]
1674
1675 if '.' in e_rest:
1676 return False
1677
1678 if pm == 1 and exps == 0:
1679 return False
1680 return alltrue([n in num_chars + ['.', 'e', '+', '-'] for n in s_rest])
1681
1682
1684 """Find position of numeric tail in alphanumeric string.
1685
1686 e.g. findNumTailPos('abc678') = 3"""
1687 try:
1688 l = len(s)
1689 if l > 1:
1690 if s[-1] not in num_chars or s[0] in num_chars:
1691 raise ValueError("Argument must be an alphanumeric string "
1692 "starting with a letter and ending in a number")
1693 for i in range(1, l+1):
1694 if s[-i] not in num_chars:
1695 return l-i+1
1696 else:
1697 raise ValueError("Argument must be alphanumeric string starting "
1698 "with a letter and ending in a number")
1699 except TypeError:
1700 raise ValueError("Argument must be alphanumeric string starting "
1701 "with a letter and ending in a number")
1702
1703
1705 s_split = s.split(sep)
1706 return len(s_split) > 1 and alltrue([isNameToken(t, treatMultiRefs) \
1707 for t in s_split])
1708
1710 brace = findEndBrace(s, '[',']')
1711 if s[0] == '[' and isinstance(brace, int):
1712 return ',' in s[1:brace]
1713
1714
1716 return [replaceSep(spec) for spec in speclist]
1717
1720
1722 """Invert default name separator replacement."""
1723 return replaceSep(spec, "_", NAMESEP)
1724
1725
1727 """Replace hierarchy separator character with another and return spec
1728 string. e.g. "." -> "_"
1729 Only replaces the character between name tokens, not between numbers."""
1730
1731 for char in sourcesep:
1732 if alphabet_chars_RE.match(char) is not None:
1733 raise ValueError("Source separator must be non-alphanumeric")
1734 for char in targetsep:
1735 if alphabet_chars_RE.match(char) is not None:
1736 raise ValueError("Target separator must be non-alphanumeric")
1737 if isinstance(spec, str):
1738 return replaceSepStr(spec, sourcesep, targetsep)
1739 else:
1740
1741 return replaceSepStr(str(spec()), sourcesep, targetsep)
1742
1743
1745 """Map names in <target> argument using the symbolMapClass
1746 object <themap>, returning a renamed version of the target.
1747 N.B. Only maps the keys of a dictionary type"""
1748 try:
1749 themap.lookupDict
1750 except AttributeError:
1751 t = repr(type(themap))
1752 raise TypeError("Map argument must be of type symbolMapClass, not %s"%t)
1753 if hasattr(target, 'mapNames'):
1754 ct = copy(target)
1755 ct.mapNames(themap)
1756 return ct
1757 elif isinstance(target, list):
1758 return themap(target)
1759 elif isinstance(target, tuple):
1760 return tuple(themap(target))
1761 elif hasattr(target, 'iteritems'):
1762 o = {}
1763 for k, v in target.iteritems():
1764 o[themap(k)] = v
1765 return o
1766 elif isinstance(target, str):
1767 return themap(target)
1768 elif target is None:
1769 return None
1770 else:
1771 raise TypeError("Invalid target type %s"%repr(type(target)))
1772
1773
1774
1776
1777
1778
1779
1780 outstr = ""
1781
1782 try:
1783 for t in spec[:]:
1784 if isHierarchicalName(t, sourcesep):
1785 outstr += t.replace(sourcesep, targetsep)
1786 else:
1787
1788 outstr += t
1789 except AttributeError:
1790 raise ValueError("Invalid QuantSpec passed to replaceSep()")
1791 return outstr
1792
1793
1795
1796
1797
1798 outstr = ""
1799 treatMultiRefs = '[' in spec and ']' in spec
1800 p = parserObject(spec, treatMultiRefs=treatMultiRefs,
1801 includeProtected=False)
1802
1803
1804
1805
1806
1807 state = 0
1808 for t in p.tokenized:
1809 if isNameToken(t):
1810 if sourcesep in t:
1811
1812 tsplit = t.split(sourcesep)
1813 if alltrue([isNameToken(ts) for ts in tsplit]):
1814 outstr += targetsep.join(tsplit)
1815 else:
1816 outstr += t
1817 else:
1818 if state == 0:
1819 state = 1
1820 outstr += t
1821 elif state == 1:
1822 state = 0
1823 outstr += t
1824 else:
1825
1826 state = 1
1827 outstr += targetsep + t
1828 continue
1829 elif t == sourcesep:
1830 if state == 1:
1831 state = 2
1832
1833 else:
1834
1835 state = 0
1836 outstr += t
1837 else:
1838 state = 0
1839 outstr += t
1840 return outstr
1841
1842
1844 """Join a list of strings into a single string (in order)."""
1845
1846 return ''.join(strlist)
1847
1848
1850 """Join a list of objects in their string representational form."""
1851
1852 retstr = ''
1853 for o in objlist:
1854 if type(o) is str:
1855 retstr += o + sep
1856 else:
1857 retstr += str(o) + sep
1858 avoidend = len(sep)
1859 if avoidend > 0:
1860 return retstr[:-avoidend]
1861 else:
1862 return retstr
1863
1864
1866 """Count number of specified separators (default = ',') in given string,
1867 avoiding occurrences of the separator inside nested braces"""
1868
1869 num_seps = 0
1870 brace_depth = 0
1871 for s in specstr:
1872 if s == sep and brace_depth == 0:
1873 num_seps += 1
1874 elif s == '(':
1875 brace_depth += 1
1876 elif s == ')':
1877 brace_depth -= 1
1878 return num_seps
1879
1880
1882 """Convert string representation of m-by-n matrix into a single
1883 string, assuming a nested comma-delimited list representation in
1884 the input, and outputting a dictionary of the sub-lists, indexed
1885 by the ordered list of names specvars (specified as an
1886 argument)."""
1887
1888 specdict = {}
1889
1890 n = len(specvars)
1891 if n == 0:
1892 raise ValueError("parseMatrixStrToDictStr: specvars was empty")
1893 if m == 0:
1894
1895 m = len(specvars)
1896
1897 spectemp1 = specstr.strip()
1898 assert spectemp1[0] == '[' and spectemp1[-1] == ']', \
1899 ("Matrix must be supplied as a Python matrix, using [ and ] syntax")
1900
1901 spectemp2 = spectemp1[1:-1].replace(' ','').replace('\n','')
1902 splitdone = False
1903 entrycount = 0
1904 startpos = 0
1905 try:
1906 while not splitdone:
1907 nextrbrace = findEndBrace(spectemp2[startpos:],
1908 '[', ']') + startpos
1909 if nextrbrace is None:
1910 raise ValueError("Mismatched braces before end of string")
1911 specdict[specvars[entrycount]] = \
1912 spectemp2[startpos:nextrbrace+1]
1913 entrycount += 1
1914 if entrycount < n:
1915 nextcomma = spectemp2.find(',', nextrbrace)
1916 if nextcomma > 0:
1917 nextlbrace = spectemp2.find('[', nextcomma)
1918 if nextlbrace > 0:
1919 startpos = nextlbrace
1920 else:
1921 raise ValueError("Not enough comma-delimited entries")
1922 else:
1923 raise ValueError("Not enough comma-delimited entries")
1924 else:
1925 splitdone = True
1926 except:
1927 print "Error in matrix specification"
1928 raise
1929 return specdict
1930
1931
1932 -def readArgs(argstr, lbchar='(', rbchar=')'):
1933 """Parse arguments out of string beginning and ending with braces
1934 (default: round brace).
1935
1936 Returns a triple: [success_boolean, list of arguments, number of args]"""
1937 bracetest = argstr[0] == lbchar and argstr[-1] == rbchar
1938 rest = argstr[1:-1].replace(" ","")
1939 pieces = []
1940 while True:
1941 if '(' in rest:
1942 lix = rest.index('(')
1943 rix = findEndBrace(rest[lix:]) + lix
1944 new = rest[:lix].split(",")
1945 if len(pieces) > 0:
1946 pieces[-1] = pieces[-1] + new[0]
1947 pieces.extend(new[1:])
1948 else:
1949 pieces.extend(new)
1950 if len(pieces) > 0:
1951 pieces[-1] = pieces[-1] + rest[lix:rix+1]
1952 else:
1953 pieces.append(rest[lix:rix+1])
1954 rest = rest[rix+1:]
1955 else:
1956 new = rest.split(",")
1957 if len(pieces) > 0:
1958 pieces[-1] = pieces[-1] + new[0]
1959 pieces.extend(new[1:])
1960 else:
1961 pieces.extend(new)
1962
1963 break
1964 return [bracetest, pieces, len(argstr)]
1965
1966
1968 """Find position in string (or list of strings), s, at which final matching
1969 brace occurs (if at all). If not found, returns None.
1970
1971 s[0] must be the left brace character. Default left and right braces are
1972 '(' and ')'. Change them with the optional second and third arguments.
1973 """
1974 pos = 0
1975 assert s[0] == lbchar, 'string argument must begin with left brace'
1976 stemp = s
1977 leftbrace_count = 0
1978 notDone = True
1979 while len(stemp) > 0 and notDone:
1980
1981 try:
1982 left_pos = stemp.index(lbchar)
1983 except ValueError:
1984 left_pos = -1
1985 try:
1986 right_pos = stemp.index(rbchar)
1987 except ValueError:
1988 right_pos = -1
1989 if left_pos >= 0:
1990 if left_pos < right_pos:
1991 if left_pos >= 0:
1992 leftbrace_count += 1
1993 pos += left_pos+1
1994 stemp = s[pos:]
1995 else:
1996
1997 if leftbrace_count > 0:
1998 leftbrace_count -= 1
1999 pos += right_pos+1
2000 stemp = s[pos:]
2001 else:
2002
2003 leftbrace_count -= 1
2004 pos += right_pos+1
2005 stemp = s[pos:]
2006 else:
2007 if right_pos >= 0:
2008
2009 leftbrace_count -= 1
2010 pos += right_pos+1
2011 stemp = s[pos:]
2012 else:
2013
2014 raise ValueError('End of string found before closing brace')
2015 if leftbrace_count == 0:
2016 notDone = False
2017
2018 pos -= 1
2019 if leftbrace_count == 0:
2020 return pos
2021 else:
2022 return None
2023
2024
2026 """wrap objlist into a comma separated string of str(objects)"""
2027 parlist = ', '.join(map(lambda i: prefix+str(i), objlist))
2028 return parlist
2029
2030
2031 -def wrapArgInCall(source, callfn, wrapL, wrapR=None, argnums=[0],
2032 notFirst=False):
2033 """Add delimiters to single argument in function call."""
2034 done = False
2035 output = ""
2036 currpos = 0
2037 first_occurrence = True
2038 if wrapR is None:
2039
2040
2041 wrapR = wrapL
2042 assert isinstance(wrapL, str) and isinstance(wrapR, str), \
2043 "Supplied delimiters must be strings"
2044 while not done:
2045
2046 findposlist = [source[currpos:].find(callfn+'(')]
2047 try:
2048 findpos = min(filter(lambda x:x>=0,findposlist))+currpos
2049 except ValueError:
2050 done = True
2051 if not done:
2052
2053 startbrace = source[findpos:].find('(')+findpos
2054 endbrace = findEndBrace(source[startbrace:])+startbrace
2055 output += source[currpos:startbrace+1]
2056
2057
2058 currpos = startbrace+1
2059 numargs = source[startbrace+1:endbrace].count(',')+1
2060 if max(argnums) >= numargs:
2061 raise ValueError("Specified argument number out of range")
2062 if numargs > 1:
2063 for argix in range(numargs-1):
2064 nextcomma = source[currpos:endbrace].find(',')
2065 argstr = source[currpos:currpos + nextcomma]
2066
2067 argstr = argstr.strip()
2068 if argix in argnums:
2069 if first_occurrence and notFirst:
2070 output += source[currpos:currpos + nextcomma + 1]
2071 else:
2072 output += wrapL + argstr + wrapR + ','
2073 first_occurrence = False
2074 else:
2075 output += source[currpos:currpos + nextcomma + 1]
2076 currpos += nextcomma + 1
2077 if numargs-1 in argnums:
2078 if first_occurrence and notFirst:
2079
2080
2081 output += source[currpos:endbrace+1]
2082 else:
2083
2084 argstr = source[currpos:endbrace]
2085
2086 argstr = argstr.strip()
2087 output += wrapL + argstr + wrapR + ')'
2088 first_occurrence = False
2089 else:
2090
2091 output += source[currpos:endbrace+1]
2092 else:
2093 if argnums[0] != 0:
2094 raise ValueError("Specified argument number out of range")
2095 if first_occurrence and notFirst:
2096 output += source[currpos:endbrace+1]
2097 else:
2098 argstr = source[currpos:endbrace]
2099
2100 argstr = argstr.strip()
2101 output += wrapL + argstr + wrapR + ')'
2102 first_occurrence = False
2103 currpos = endbrace+1
2104 else:
2105 output += source[currpos:]
2106 return output
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2201 """Replace all function calls in source with dummy names,
2202 for the functions listed in callfns. Returns a pair (new_source, d)
2203 where d is a dict mapping the dummy names used to the function calls.
2204 """
2205
2206
2207
2208 if used_dummies is None:
2209 used_dummies = 0
2210 done = False
2211 dummies = {}
2212 output = ""
2213 currpos = 0
2214 doneFirst = False
2215 while not done:
2216
2217 findposlist_candidates = [source[currpos:].find(fname+'(') for fname in callfns]
2218 findposlist = []
2219 for candidate_pos in findposlist_candidates:
2220
2221
2222
2223 if currpos+candidate_pos-1 >= 0:
2224 if not isNameToken(source[currpos+candidate_pos-1]):
2225 findposlist.append(candidate_pos)
2226 else:
2227
2228 findposlist.append(candidate_pos)
2229 findposlist = [ix for ix in findposlist if ix >= 0]
2230 findposlist.sort()
2231 if not doneFirst and notFirst and len(findposlist) > 0:
2232 findposlist = findposlist[1:]
2233 doneFirst = True
2234 try:
2235 findpos = findposlist[0]+currpos
2236 except IndexError:
2237 done = True
2238 if not done:
2239
2240 startbrace = source[findpos:].find('(')+findpos
2241 endbrace = findEndBrace(source[startbrace:])+startbrace
2242 sub_source = source[startbrace+1:endbrace]
2243 embedded_calls = [sub_source.find(fname+'(') for fname in callfns]
2244 try:
2245 subpositions = filter(lambda x:x>0, embedded_calls)
2246 if subpositions == []:
2247 filtered_sub_source = sub_source
2248 new_d = {}
2249 else:
2250 filtered_sub_source, new_d = \
2251 replaceCallsWithDummies(sub_source,
2252 callfns, used_dummies)
2253 except ValueError:
2254 pass
2255 else:
2256 if new_d != {}:
2257 dummies.update(new_d)
2258 used_dummies = max( (used_dummies, max(dummies.keys())) )
2259 used_dummies += 1
2260 dummies[used_dummies] = source[findpos:startbrace+1] + \
2261 filtered_sub_source + ')'
2262 output += source[currpos:findpos] + '__dummy%i__' % used_dummies
2263 currpos = endbrace+1
2264 else:
2265 output += source[currpos:]
2266 if dummies != {}:
2267 new_dummies = dummies.copy()
2268 for k, v in dummies.items():
2269 new_v, new_d = replaceCallsWithDummies(v, callfns,
2270 used_dummies, notFirst=True)
2271 new_dummies[k] = new_v
2272 if new_d != {}:
2273 new_dummies.update(new_d)
2274 used_dummies = max( (used_dummies, max(new_dummies.keys())) )
2275 dummies.update(new_dummies)
2276 return output, dummies
2277
2278
2280 """Add an argument to calls in source, to the functions listed in callfns.
2281 """
2282
2283
2284
2285 if isinstance(callfns, list):
2286 if len(callfns) > 1:
2287 res = source
2288 for f in callfns:
2289 res = addArgToCalls(res, [f], arg, notFirst)
2290 return res
2291 else:
2292 raise TypeError("Invalid list of function names")
2293 done = False
2294 output = ""
2295 currpos = 0
2296 while not done:
2297
2298 findposlist_candidates = [source[currpos:].find(fname+'(') for fname in callfns]
2299 findposlist = []
2300 for candidate_pos in findposlist_candidates:
2301
2302
2303
2304 if currpos+candidate_pos-1 >= 0:
2305 if not isNameToken(source[currpos+candidate_pos-1]):
2306 findposlist.append(candidate_pos)
2307
2308 findposlist_candidates.remove(candidate_pos)
2309 else:
2310
2311 findposlist.append(candidate_pos)
2312
2313 findposlist_candidates.remove(candidate_pos)
2314 try:
2315 findpos = min(filter(lambda x:x>=0,findposlist))+currpos
2316 except ValueError:
2317
2318 done = True
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329 if not done:
2330
2331 startbrace = source[findpos:].find('(')+findpos
2332 endbrace = findEndBrace(source[startbrace:])+startbrace
2333 sub_source = source[startbrace+1:endbrace]
2334 embedded_calls = [sub_source.find(fname+'(') for fname in callfns]
2335 try:
2336 subpositions = filter(lambda x:x>0,embedded_calls)
2337 if subpositions == []:
2338 filtered_sub_source = sub_source
2339 else:
2340 filtered_sub_source = addArgToCalls(sub_source,callfns,arg,notFirst)
2341 except ValueError:
2342 pass
2343
2344
2345 if currpos==0 and callfns == [notFirst]:
2346 notFirst = ''
2347 addStr = ''
2348 else:
2349 if filtered_sub_source == '':
2350 addStr = arg
2351 else:
2352 addStr = ', ' + arg
2353 output += source[currpos:startbrace+1] + filtered_sub_source \
2354 + addStr + ')'
2355 currpos = endbrace+1
2356 else:
2357 output += source[currpos:]
2358 return output
2359
2360
2362 """Determine whether string argument 'term' appears one or more times in
2363 string argument 'specstr' as a proper symbol (not just as part of a longer
2364 symbol string or a number).
2365 """
2366 ix = 0
2367 term_len = len(term)
2368 while ix < len(specstr) and term_len > 0:
2369 found_ix = specstr[ix:].find(term)
2370 pos = found_ix + ix
2371 if found_ix > -1:
2372 try:
2373 if specstr[pos + term_len] not in [')', '+', '-', '/', '*', ' ',
2374 ']', ',', '<', '>', '=', '&', '^']:
2375
2376
2377 ix = pos + term_len
2378 continue
2379 except IndexError:
2380
2381 pass
2382 if isNumericToken(term):
2383
2384
2385
2386
2387
2388 if specstr[pos-1] in num_chars + ['.', 'e'] or \
2389 specstr[pos + term_len] in num_chars + ['.', 'e']:
2390 ix = pos + term_len
2391 continue
2392 return True
2393 else:
2394 break
2395 return False
2396
2397
2399 """From the indices 0:max_ix+1, remove the individual
2400 index values in ixs.
2401 Returns the remaining ranges of indices and singletons.
2402 """
2403 ranges = []
2404 i0 = 0
2405 for ix in ixs:
2406 i1 = ix - 1
2407 if i1 < i0:
2408 i0 = ix + 1
2409 elif i1 == i0:
2410 ranges.append([i0])
2411 i0 = ix + 1
2412 else:
2413 ranges.append([i0,i1+1])
2414 i0 = ix + 1
2415 if i0 < max_ix:
2416 ranges.append([i0, max_ix+1])
2417 elif i0 == max_ix:
2418 ranges.append([i0])
2419 return ranges
2420