yappy-1.9.4/0000755000076600000240000000000011404125030011156 5ustar dialoutyappy-1.9.4/demo.py0000644000076600000240000003720311404125030012461 0ustar dialout# -*- coding: utf-8 -*- # # # This is part of Yappy # # # demo.py -- some simple parsers # # Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt # #from yappy.parser import * import sys, string from yappy.parser import * ############## Demos ##################### class SimpleExp(Yappy): """ A parser for simple arithmetic expresssion. Allows blanks between tokens""" def __init__(self,no_table=0, table='saexp.tab'): grammar = grules([ ("E -> E + T", self.Add), ("E ->T", DefaultSemRule), ("T -> T * F", self.Mul ), ("T -> F", DefaultSemRule), ("F -> ( E )", self.ParSemRule), ("F -> id", DefaultSemRule)] ) tokenize=[("\s+",""), ("\d+",lambda x: ("id",int(x))), ("\+",lambda x: (x,x)), ("\*",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table, tmpdir='/tmp') def ParSemRule(self,list,context=None): return list[1] def DoPrint(self,list,context=None): print list[0] return list[0] def Add(self,list,context): print list return list[0] + list[2] def Mul(self,list,context): print list return list[0] * list[2] def test(self): st = " 2 + 24 + 34 * 2 + 1" print "Input: %s" %st print "Result:", self.input(st) class SimpleExp3(SimpleExp): """ A parser for simple arithmetic expresssion. Allows blanks between tokens""" def __init__(self,no_table=0, table='saexp.tab'): grammar = """ E -> E + T {{ "sum([$0,$2])"}}; E ->T ; T -> T * F" {{ self.Mul }}; T -> F ; F -> ( E ) {{ self.ParSemRule}}; F -> id; """ tokenize=[("\s+",""), ("\d+",lambda x: ("id",int(x))), ("\+",lambda x: (x,x)), ("\*",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) class SimpleExp1(Yappy): """ A parser for simple arithmetic expresssions, with operators """ def __init__(self,no_table=0, table='saexp1.tab', tabletype=LALRtable,noconflicts=1,expect=0): grammar = grules([ ("E -> E add_op T", self.Add), ("E ->T", DefaultSemRule), ("T -> T mul_op F", self.Mul), ("T -> F", DefaultSemRule), ("F -> ( E )", self.ParSemRule), ("F -> id", DefaultSemRule)]) tokenize=[("\d+",lambda x: ("id",int(x))), ("[+-]",lambda x: ("add_op",self.make_op(x))), ("[*/]",lambda x: ("mul_op",self.make_op(x))), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) def make_op(self,op): return {"+" : operator.add, '-' : operator.sub, '*' : operator.mul, '/' : operator.div, '%' : operator.mod }[op] def ParSemRule(self,list,context=None): return list[1] def DoPrint(self,list,context=None): print list[0] return list[0] def Add(self,list,context): print list return apply(list[1],[list[0],list[2]]) def Mul(self,list,context): print list return apply(list[1],[list[0],list[2]]) def test(self): st = "2-24*9" st1 = "2-24*9-34*2+1" print "Input: %s" %st print "Result:", self.input(st) class SimpleExp2(SimpleExp1): """ A parser for simple arithmetic expresssions with prec and associativity""" def __init__(self,no_table=0, table='saexp2.tab', tabletype=LALRtable,noconflicts=1,expect=0): self.line = 0 grammar = grules([ ("E -> E add_op T", self.Add), ("E ->T", DefaultSemRule), ("T -> T mul_op F", self.Mul), ("T -> F", DefaultSemRule), ("F -> ( E )", self.ParSemRule), ("F -> id", DefaultSemRule)]) tokenize=[("\d+",lambda x: ("id",int(x))), ("\n+",lambda x: (x,self.countline())), ("[+-]",lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')), ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) def countline(self): self.line+=1 return "" def make_op(self,op): """ """ return {"+" : operator.add, '-' : operator.sub, '*' : operator.mul, '/' : operator.div, '%' : operator.mod }[op] def ParSemRule(self,list,context=None): return list[1] def DoPrint(self,list,context=None): print list[0] return list[0] def Add(self,list,context): print list return apply(list[1],[list[0],list[2]]) def Mul(self,list,context): print list return apply(list[1],[list[0],list[2]]) class SimpleExpAmb(SimpleExp2): """A parser for simple arithmetic expresssions with an ambiguous grammar """ def __init__(self,no_table=0, table='expamb.tab',tabletype=LALRtable,noconflicts=1,expect=0): grammar = grules([ ("E -> E add_op E", self.Add), ("E -> E mul_op E", self.Mul), ("E -> ( E )", self.ParSemRule), ("E -> id", DefaultSemRule)]) sinal = "[+-]" integer ="\d" tokenize=[("(%s)+"%integer,lambda x: ("id",int(x))), (sinal,lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')), ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) class SimpleExpAmb2(SimpleExp2): """A parser for simple arithmetic expresssions with an ambiguous grammar, and context-dependent precedence """ def __init__(self,no_table=0, table='expamb.tab',tabletype=LALRtable,noconflicts=1,expect=0): grammar = grules([ ("E -> E add_op E", self.Add), ("E -> E mul_op E", self.Mul), ("E -> n_op E", lambda l,c: -1*l[1]), ("E -> ( E )", self.ParSemRule), ("E -> id", DefaultSemRule)]) plus = "[+-]" integer = "\d" tokenize=[("(%s)+"%integer,lambda x: ("id",int(x))), ("%s"%plus,lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')), ("~",lambda x: ("n_op",self.make_op('-')),("n_op",300,'left')), ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) def test(self): st=[ "~2", "2-24*9", "2-24*9-34*2+1", "~2-24*9-34*2+1", "2+3+(~5*(2+3)*2)-24*9-34*2+1" ] for i in st: print "Input: %s" %i print "Result:", self.input(i) class ListAVG(Yappy): """A parser for transforming a list atrib=value into a python dictionary """ def __init__(self,no_table=0, table='Listavg.tab'): grammar = """ E -> ( ) {{self.EmptyDict}}; E -> ( AVL ) {{self.ParSemRule}} ; AVL -> AV , AVL | AV {{EmptySemRule}} ; AV -> tok = tok {{ self.AddItem }}; """ tokenize = [ ("\s+",""), ("[A-Za-z0-9]+",lambda x: ("tok",x)), ("\=",lambda x: (x,x)), (",",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) def ParSemRule(self,list,context=None): return list[1] def DoPrint(self,list,context=None): print list[0] return list[0] def EmptyDict(self,list,context): return [] def AddItem(self,list,context): if not isinstance(list[0], StringType): raise NameError, "Key %s must be a string" % list[0] context[list[0]] = list[2] return [] def test(self): st = "(a=5,b=6,c=7)" print "Input: %s" %st self.input(st,context={}) print self.context class ListAVG1(ListAVG): """A parser for transforming a list atrib=value into a python dictionary """ def __init__(self,no_table=0, table='Listavg1.tab'): grammar = """ E -> ( ) {{self.EmptyDict}}; E -> ( AVL ) {{self.ParSemRule}} ; AVL -> AV , AVL | AV {{EmptySemRule}} ; AV -> tok = tok {{ self.AddItem }}; """ tokenize = [ ("\s+",""), ("[A-Za-z0-9]+",lambda x: ("tok",x)), ("\=",lambda x: (x,x)), (",",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) class ListAVG2(ListAVG): """A parser for transforming a list atrib=value into a python dictionary """ def __init__(self,no_table=0, table='Listavg1.tab'): grammar = """ E -> ( AVL ) {{self.ParSemRule}} ; AVL -> AV , AVL {{DefaultSemRule}} | ; AV -> tok = tok {{ self.AddItem }}; """ tokenize = [ ("\s+",""), ("[A-Za-z0-9]+",lambda x: ("tok",x)), ("\=",lambda x: (x,x)), (",",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) def test(self): st = "(a=5,b=6,c=7,)" print "Input: %s" %st self.input(st,context={}) print self.context class RegExp(Yappy): def __init__(self,no_table=0, table='regamb.tab', tabletype=LALRtable, noconflicts=1,expect=0): """ A parser for regular expressions with operators. Semantic rules are dummy...""" grammar = grules([("r -> r | r",self.OrSemRule), ("r -> r . r",self.ConcatSemRule), ("r -> r *",self.StarSemRule, (300,'left')), ("r -> ( r )",self.ParSemRule), ("r -> id",self.BaseSemRule), ]) tokenize =[ ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("|",x),("|",100,'left')), ("[\.]",lambda x: (".",""),(".",200,'left')), ("[*]",lambda x: (x,x), ("*",300,'left')), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype) ##Semantic rules build a parse tree... def OrSemRule(self,list,context): return "(%s+%s)" %(list[0],list[2]) def ConcatSemRule(self,list,context): return "(%s%s)" %(list[0],list[2]) def ParSemRule(self,list,context): return "(%s)" %list[1] def BaseSemRule(self,list,context): return list[0] def StarSemRule(self,list,context): return "(%s*)" %list[0] def test(self): st = ["(a+b)*.a.a.b*", "a+a.b+a.b.(a+a)*", "a+a.b+a.(a+a)**", "a+a.b.c", "a+a.b.(c+b)", "a+a.b.(c+b)*", "a+a.b*.(a+b)"] for i in st: print "Input: %s" %i print self.input(i) class RegExp1(RegExp): def __init__(self,no_table=0, table='tableambreg1',tabletype=LALRtable, noconflicts=1,expect=0): """A parser for regular expressions with ambiguous rules """ grammar = grules([("reg -> reg + reg",self.OrSemRule), ("reg -> reg reg",self.ConcatSemRule,(200,'left')), ("reg -> reg *",self.StarSemRule), ("reg -> ( reg )",self.ParSemRule), ("reg -> id",self.BaseSemRule) ]) tokenize =[ ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("+",x),("+",100,'left')), ("[*]",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect) def ConcatSemRule(self,list,context=None): return "(%s%s)" %(list[0],list[1]) def test(self): st = ["(a+b)*aab*", "(a+ab)*a*", "(a+a)a+ab", "a+ab+(a(a+a)*)*", "a+ab+a(a+a)**", "(a+a)**ab(a+b)**", "aa+bb**", "(a+ab)(a+ab)(ac+a)", "a+abc+ad", "abc+b+ad", "a+ab", "a+b+ab+cccaaaaaa", "a+ab(a+a)", "ab+ab(a+a)a*", "a+ab*", "(a+ab*(a+b))*", "a+ab*(a+b)", "a+c+ab(a+b)", "a+c+(a+b)ab", "a+b*", "aa+b*", "aab*ab+a*+aa", "aab*ab**+(a+aa)**" ] for i in st: print "Input: %s" %i print self.input(i) class RegExp2(RegExp1): def __init__(self,no_table=0, table='tableambreg2'): grammar = """ reg -> reg + reg {{ self.OrSemRule }} | reg reg {{ self.ConcatSemRule}} // 200 left| reg * {{ self.StarSemRule }} | ( reg ) {{self.ParSemRule }} | id {{ self.BaseSemRule }} ; """ tokenize = [("@epsilon",lambda x: ("id",x)), ("@empty_set",lambda x: ("id",x)), ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("+",x),("+",100,'left')), ("[*]",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table) class RegExp3(RegExp): def __init__(self,no_table=0, table='tableambreg3'): """A erronous parser for regular expressions with ambiguous rules and no precedence information """ grammar = grules([("reg -> reg | reg",self.OrSemRule), ("reg -> reg reg",self.ConcatSemRule), ("reg -> reg *",self.StarSemRule), ("reg -> ( reg )",self.ParSemRule), ("reg -> id",self.BaseSemRule), ]) tokenize =[ ("[A-Za-z0-9]",lambda x: ("id",x)), ("[+|]",lambda x: ("|",x)), ("[*]",lambda x: (x,x)), ("\(|\)",lambda x: (x,x)) ] Yappy.__init__(self,tokenize,grammar,table,no_table,LALRtable,1) def ConcatSemRule(self,list,context=None): return list[0]+list[1] def test(self): st = "(a+b)*aab*" print "Input: %s" %st print self.input(st) def Sum(a,b): return a+b def curry(f,*a,**kw): def curried(*more_a,**more_kw): return f(*(a+more_a),**dict(kw,**more_kw)) return curried if __name__ == '__main__': d = SimpleExpAmb() st = "2-24*9" print "Input:", st print "Result:", d.input(st) st = "2-24*9-34*2+1" print "Input:", st print "Result:", d.input(st) d = RegExp2() print "Result:", d.input("a+b*") yappy-1.9.4/html/0000755000076600000240000000000011404125044012127 5ustar dialoutyappy-1.9.4/html/blank.gif0000644000076600000240000000364611404125044013716 0ustar dialoutGIF87a  3f333f3333f3ffffff3f̙3f3f333f333333333f33333333f33f3ff3f3f3f3333f33̙33333f3333333f3333f3ffffff3f33ff3f3f3f3fff3ffffffffffff3ffff̙fff3fffffff3ffffff3f333f3333f3ffffff3f̙̙3̙f̙̙̙̙3f3f̙333f3̙333f3fff̙fff3f̙̙3f̙3f̙3f333f3333f3ffffff3f̙3f3f, @e˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[6ٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖ `lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lٲe˖-[lf˖-[lٲe˖- ;yappy-1.9.4/html/images.aux0000644000076600000240000000001011404125044014102 0ustar dialout\relax yappy-1.9.4/html/images.bbl0000644000076600000240000000173111404125044014057 0ustar dialout\begin{thebibliography}{HMU00} \bibitem[ASU86]{aho86:_compil} Alfred~V. Aho, Ravi Sethi, and Jeffrey~D. Ullman. \newblock {\em Compilers: Principles, Techniques and Tools}. \newblock Addison Wesley, 1986. \bibitem[GJ90]{grune90:_parsin_techn_pract_guide} Dick Grune and Ceriel~J.H. Jacobs. \newblock {\em Parsing Techniques - A Practical Guide}. \newblock Prentice Hall, 1990. \newblock Available for downloading in PostScript format. \bibitem[HMU00]{hopcroft00:_introd_autom_theor_languag_comput} John~E. Hopcroft, Rajeev Motwani, and Jeffrey~D. Ullman. \newblock {\em Introduction to Automata Theory, Languages and Computation}. \newblock Addison Wesley, 2nd edition, 2000. \bibitem[Lut96]{lutz96:_progr_python} M.~Lutz. \newblock {\em Programming Python}. \newblock O'Reilly \& Associates, 1996. \bibitem[rs00]{watters00:_parse_gener_python} Aaron~Watte rs. \newblock {\em Parse Generation in Python}. \newblock New Jersey Institute of Technology, 2000. \end{thebibliography} yappy-1.9.4/html/images.log0000644000076600000240000001031311404125044014075 0ustar dialoutThis is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=latex 2010.6.6) 10 JUN 2010 10:02 entering extended mode restricted \write18 enabled. %&-line parsing enabled. **./images.tex (./images.tex LaTeX2e <2009/09/24> Babel and hyphenation patterns for english, usenglishmax, dumylang, noh yphenation, loaded. (/usr/share/texmf-texlive/tex/latex/base/article.cls Document Class: article 2007/10/19 v1.4h Standard LaTeX document class (/usr/share/texmf-texlive/tex/latex/base/size10.clo File: size10.clo 2007/10/19 v1.4h Standard LaTeX file (size option) ) \c@part=\count79 \c@section=\count80 \c@subsection=\count81 \c@subsubsection=\count82 \c@paragraph=\count83 \c@subparagraph=\count84 \c@figure=\count85 \c@table=\count86 \abovecaptionskip=\skip41 \belowcaptionskip=\skip42 \bibindent=\dimen102 ) (/usr/share/texmf-texlive/tex/latex/base/ifthen.sty Package: ifthen 2001/05/26 v1.1c Standard LaTeX ifthen package (DPC) ) (/usr/share/texmf-texlive/tex/latex/base/inputenc.sty Package: inputenc 2008/03/30 v1.1d Input encoding file \inpenc@prehook=\toks14 \inpenc@posthook=\toks15 (/usr/share/texmf-texlive/tex/latex/base/latin1.def File: latin1.def 2008/03/30 v1.1d Input encoding file )) (/usr/share/texmf-texlive/tex/latex/ltxmisc/a4wide.sty Package: a4wide 1994/08/30 (/usr/share/texmf-texlive/tex/latex/ntgclass/a4.sty Package: a4 2004/04/15 v1.2g A4 based page layout )) (/usr/share/texmf/tex/latex/html/verbatimfiles.sty \c@lineno=\count87 ) (/usr/share/texmf/tex/latex/html/html.sty Package: html 1999/07/19 v1.38 hypertext commands for latex2html (nd, hws, rrm) \c@lpart=\count88 \c@lchapter=\count89 \c@chapter=\count90 \c@lsection=\count91 \c@lsubsection=\count92 \c@lsubsubsection=\count93 \c@lparagraph=\count94 \c@lsubparagraph=\count95 \c@lsubsubparagraph=\count96 \ptrfile=\write3 ) (/usr/share/texmf-texlive/tex/latex/graphics/color.sty Package: color 2005/11/14 v1.0j Standard LaTeX Color (DPC) (/etc/texmf/tex/latex/config/color.cfg File: color.cfg 2007/01/18 v1.5 color configuration of teTeX/TeXLive ) Package color Info: Driver file: dvips.def on input line 130. (/usr/share/texmf-texlive/tex/latex/graphics/dvips.def File: dvips.def 1999/02/16 v3.0i Driver-dependant file (DPC,SPQR) ) (/usr/share/texmf-texlive/tex/latex/graphics/dvipsnam.def File: dvipsnam.def 1999/02/16 v3.0i Driver-dependant file (DPC,SPQR) )) \sizebox=\box26 \lthtmlwrite=\write4 (./images.aux) \openout1 = `images.aux'. LaTeX Font Info: Checking defaults for OML/cmm/m/it on input line 161. LaTeX Font Info: ... okay on input line 161. LaTeX Font Info: Checking defaults for T1/cmr/m/n on input line 161. LaTeX Font Info: ... okay on input line 161. LaTeX Font Info: Checking defaults for OT1/cmr/m/n on input line 161. LaTeX Font Info: ... okay on input line 161. LaTeX Font Info: Checking defaults for OMS/cmsy/m/n on input line 161. LaTeX Font Info: ... okay on input line 161. LaTeX Font Info: Checking defaults for OMX/cmex/m/n on input line 161. LaTeX Font Info: ... okay on input line 161. LaTeX Font Info: Checking defaults for U/cmr/m/n on input line 161. LaTeX Font Info: ... okay on input line 161. latex2htmlLength hsize=349.0pt latex2htmlLength vsize=713.0pt latex2htmlLength hoffset=0.0pt latex2htmlLength voffset=0.0pt latex2htmlLength topmargin=0.0pt latex2htmlLength topskip=0.00003pt latex2htmlLength headheight=0.0pt latex2htmlLength headsep=0.0pt latex2htmlLength parskip=0.0pt plus 1.0pt latex2htmlLength oddsidemargin=18.06749pt latex2htmlLength evensidemargin=18.06749pt LaTeX Font Info: External font `cmex10' loaded for size (Font) <7> on input line 198. LaTeX Font Info: External font `cmex10' loaded for size (Font) <5> on input line 198. l2hSize :tex2html_wrap_inline201:6.95831pt::6.95831pt::8.27783pt. [1 ] (./images.aux) ) Here is how much of TeX's memory you used: 950 strings out of 495062 12346 string characters out of 1182645 54604 words of memory out of 3000000 4177 multiletter control sequences out of 15000+50000 3640 words of font info for 14 fonts, out of 3000000 for 9000 28 hyphenation exceptions out of 8191 23i,5n,19p,162b,147s stack positions out of 5000i,500n,10000p,200000b,50000s Output written on images.dvi (1 page, 348 bytes). yappy-1.9.4/html/images.pl0000644000076600000240000000036011404125044013730 0ustar dialout# LaTeX2HTML 2008 (1.71) # Associate images original text with physical files. $key = q/geq;MSF=1.5;AAT/; $cached_env_img{$key} = q|$ \geq$|; 1; yappy-1.9.4/html/images.tex0000644000076600000240000002022211404125044014114 0ustar dialout\batchmode \documentclass{article} \RequirePackage{ifthen} \usepackage[latin1]{inputenc} \usepackage{a4wide} \usepackage{verbatimfiles} \usepackage{html} % \providecommand{\yappy}{\texttt{Yappy}}% \providecommand{\python}{\texttt{Python}}% \providecommand{\LR}{\texttt{LR}}% \providecommand{\fado}{\texttt{FAdo}} % \providecommand{\wwwrvr}{\htmlurl{http://www.ncc.up.pt/~rvr}}% \providecommand{\wwwnam}{\htmlurl{http://www.ncc.up.pt/~nam}}% \providecommand{\wwwpython}{\htmlurl{http://www.python.org}}% \providecommand{\wwwfado}{\htmlurl{http://www.ncc.up.pt/FAdo}}% \providecommand{\wwwyappy}{\htmlurl{http://www.ncc.up.pt/Yappy}}% \providecommand{\wwwyappydl}{\htmlurl{http://www.ncc.up.pt/~rvr/FAdoFiles}} % \providecommand{\titulo}{\texttt{Yet another \texttt{LR}(1) parser generator for \texttt{Python}}} \title{\texttt{Yappy}\\ \normalsize {\texttt{Yet another \texttt{LR}(1) parser generator for \texttt{Python}}} \author{\htmladdnormallink{Rogrio Reis}{\htmlurl{http://www.ncc.up.pt/~rvr}}, \htmladdnormallink{Nelma Moreira}{\htmlurl{http://www.ncc.up.pt/~nam}}\\{\small DCC-FC \& LIACC, Universidade do Porto, Portugal}}} \date{2000-2006} \bodytext{BGCOLOR="#FFFFF0" TEXT="DARKBLUE" LINK="RED" VLINK="DARKRED" ALINK="#FF0000" ALINK="YELLOW"} \usepackage[dvips]{color} \pagecolor[gray]{.7} \usepackage[latin1]{inputenc} \makeatletter \makeatletter \count@=\the\catcode`\_ \catcode`\_=8 \newenvironment{tex2html_wrap}{}{}% \catcode`\<=12\catcode`\_=\count@ \newcommand{\providedcommand}[1]{\expandafter\providecommand\csname #1\endcsname}% \newcommand{\renewedcommand}[1]{\expandafter\providecommand\csname #1\endcsname{}% \expandafter\renewcommand\csname #1\endcsname}% \newcommand{\newedenvironment}[1]{\newenvironment{#1}{}{}\renewenvironment{#1}}% \let\newedcommand\renewedcommand \let\renewedenvironment\newedenvironment \makeatother \let\mathon=$ \let\mathoff=$ \ifx\AtBeginDocument\undefined \newcommand{\AtBeginDocument}[1]{}\fi \newbox\sizebox \setlength{\hoffset}{0pt}\setlength{\voffset}{0pt} \addtolength{\textheight}{\footskip}\setlength{\footskip}{0pt} \addtolength{\textheight}{\topmargin}\setlength{\topmargin}{0pt} \addtolength{\textheight}{\headheight}\setlength{\headheight}{0pt} \addtolength{\textheight}{\headsep}\setlength{\headsep}{0pt} \setlength{\textwidth}{349pt} \newwrite\lthtmlwrite \makeatletter \let\realnormalsize=\normalsize \global\topskip=2sp \def\preveqno{}\let\real@float=\@float \let\realend@float=\end@float \def\@float{\let\@savefreelist\@freelist\real@float} \def\liih@math{\ifmmode$\else\bad@math\fi} \def\end@float{\realend@float\global\let\@freelist\@savefreelist} \let\real@dbflt=\@dbflt \let\end@dblfloat=\end@float \let\@largefloatcheck=\relax \let\if@boxedmulticols=\iftrue \def\@dbflt{\let\@savefreelist\@freelist\real@dbflt} \def\adjustnormalsize{\def\normalsize{\mathsurround=0pt \realnormalsize \parindent=0pt\abovedisplayskip=0pt\belowdisplayskip=0pt}% \def\phantompar{\csname par\endcsname}\normalsize}% \def\lthtmltypeout#1{{\let\protect\string \immediate\write\lthtmlwrite{#1}}}% \newcommand\lthtmlhboxmathA{\adjustnormalsize\setbox\sizebox=\hbox\bgroup\kern.05em }% \newcommand\lthtmlhboxmathB{\adjustnormalsize\setbox\sizebox=\hbox to\hsize\bgroup\hfill }% \newcommand\lthtmlvboxmathA{\adjustnormalsize\setbox\sizebox=\vbox\bgroup % \let\ifinner=\iffalse \let\)\liih@math }% \newcommand\lthtmlboxmathZ{\@next\next\@currlist{}{\def\next{\voidb@x}}% \expandafter\box\next\egroup}% \newcommand\lthtmlmathtype[1]{\gdef\lthtmlmathenv{#1}}% \newcommand\lthtmllogmath{\dimen0\ht\sizebox \advance\dimen0\dp\sizebox \ifdim\dimen0>.95\vsize \lthtmltypeout{% *** image for \lthtmlmathenv\space is too tall at \the\dimen0, reducing to .95 vsize ***}% \ht\sizebox.95\vsize \dp\sizebox\z@ \fi \lthtmltypeout{l2hSize % :\lthtmlmathenv:\the\ht\sizebox::\the\dp\sizebox::\the\wd\sizebox.\preveqno}}% \newcommand\lthtmlfigureA[1]{\let\@savefreelist\@freelist \lthtmlmathtype{#1}\lthtmlvboxmathA}% \newcommand\lthtmlpictureA{\bgroup\catcode`\_=8 \lthtmlpictureB}% \newcommand\lthtmlpictureB[1]{\lthtmlmathtype{#1}\egroup \let\@savefreelist\@freelist \lthtmlhboxmathB}% \newcommand\lthtmlpictureZ[1]{\hfill\lthtmlfigureZ}% \newcommand\lthtmlfigureZ{\lthtmlboxmathZ\lthtmllogmath\copy\sizebox \global\let\@freelist\@savefreelist}% \newcommand\lthtmldisplayA{\bgroup\catcode`\_=8 \lthtmldisplayAi}% \newcommand\lthtmldisplayAi[1]{\lthtmlmathtype{#1}\egroup\lthtmlvboxmathA}% \newcommand\lthtmldisplayB[1]{\edef\preveqno{(\theequation)}% \lthtmldisplayA{#1}\let\@eqnnum\relax}% \newcommand\lthtmldisplayZ{\lthtmlboxmathZ\lthtmllogmath\lthtmlsetmath}% \newcommand\lthtmlinlinemathA{\bgroup\catcode`\_=8 \lthtmlinlinemathB} \newcommand\lthtmlinlinemathB[1]{\lthtmlmathtype{#1}\egroup\lthtmlhboxmathA \vrule height1.5ex width0pt }% \newcommand\lthtmlinlineA{\bgroup\catcode`\_=8 \lthtmlinlineB}% \newcommand\lthtmlinlineB[1]{\lthtmlmathtype{#1}\egroup\lthtmlhboxmathA}% \newcommand\lthtmlinlineZ{\egroup\expandafter\ifdim\dp\sizebox>0pt % \expandafter\centerinlinemath\fi\lthtmllogmath\lthtmlsetinline} \newcommand\lthtmlinlinemathZ{\egroup\expandafter\ifdim\dp\sizebox>0pt % \expandafter\centerinlinemath\fi\lthtmllogmath\lthtmlsetmath} \newcommand\lthtmlindisplaymathZ{\egroup % \centerinlinemath\lthtmllogmath\lthtmlsetmath} \def\lthtmlsetinline{\hbox{\vrule width.1em \vtop{\vbox{% \kern.1em\copy\sizebox}\ifdim\dp\sizebox>0pt\kern.1em\else\kern.3pt\fi \ifdim\hsize>\wd\sizebox \hrule depth1pt\fi}}} \def\lthtmlsetmath{\hbox{\vrule width.1em\kern-.05em\vtop{\vbox{% \kern.1em\kern0.75 pt\hbox{\hglue.17em\copy\sizebox\hglue0.75 pt}}\kern.3pt% \ifdim\dp\sizebox>0pt\kern.1em\fi \kern0.75 pt% \ifdim\hsize>\wd\sizebox \hrule depth1pt\fi}}} \def\centerinlinemath{% \dimen1=\ifdim\ht\sizebox<\dp\sizebox \dp\sizebox\else\ht\sizebox\fi \advance\dimen1by.5pt \vrule width0pt height\dimen1 depth\dimen1 \dp\sizebox=\dimen1\ht\sizebox=\dimen1\relax} \def\lthtmlcheckvsize{\ifdim\ht\sizebox<\vsize \ifdim\wd\sizebox<\hsize\expandafter\hfill\fi \expandafter\vfill \else\expandafter\vss\fi}% \providecommand{\selectlanguage}[1]{}% \makeatletter \tracingstats = 1 \providecommand{\Beta}{\textrm{B}} \providecommand{\Mu}{\textrm{M}} \providecommand{\Kappa}{\textrm{K}} \providecommand{\Rho}{\textrm{R}} \providecommand{\Epsilon}{\textrm{E}} \providecommand{\Chi}{\textrm{X}} \providecommand{\Iota}{\textrm{J}} \providecommand{\omicron}{\textrm{o}} \providecommand{\Zeta}{\textrm{Z}} \providecommand{\Eta}{\textrm{H}} \providecommand{\Nu}{\textrm{N}} \providecommand{\Omicron}{\textrm{O}} \providecommand{\Tau}{\textrm{T}} \providecommand{\Alpha}{\textrm{A}} \begin{document} \pagestyle{empty}\thispagestyle{empty}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength hsize=\the\hsize}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength vsize=\the\vsize}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength hoffset=\the\hoffset}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength voffset=\the\voffset}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength topmargin=\the\topmargin}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength topskip=\the\topskip}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength headheight=\the\headheight}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength headsep=\the\headsep}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength parskip=\the\parskip}\lthtmltypeout{}% \lthtmltypeout{latex2htmlLength oddsidemargin=\the\oddsidemargin}\lthtmltypeout{}% \makeatletter \if@twoside\lthtmltypeout{latex2htmlLength evensidemargin=\the\evensidemargin}% \else\lthtmltypeout{latex2htmlLength evensidemargin=\the\oddsidemargin}\fi% \lthtmltypeout{}% \makeatother \setcounter{page}{1} \onecolumn % !!! IMAGES START HERE !!! \stepcounter{section} \stepcounter{section} \stepcounter{subsection} \stepcounter{subsubsection} \stepcounter{subsection} \stepcounter{subsubsection} \stepcounter{subsubsection} \stepcounter{subsubsection} \stepcounter{subsubsection} \stepcounter{paragraph} \stepcounter{subsubsection} \stepcounter{subsection} \stepcounter{section} {\newpage\clearpage \lthtmlinlinemathA{tex2html_wrap_inline201}% $ \geq$% \lthtmlinlinemathZ \lthtmlcheckvsize\clearpage} \stepcounter{subsection} \stepcounter{section} \stepcounter{section} \stepcounter{section} \stepcounter{section} \end{document} yappy-1.9.4/html/img1.gif0000644000076600000240000000014611404125044013454 0ustar dialoutGIF89aTTTHHH<< IBMFpJ>Q}FQ<3DNk 76 \%!;yappy-1.9.4/html/node1.html0000644000076600000240000000553211404125044014030 0ustar dialout What is Yappy?

What is Yappy?

Yappy provides a lexical analyser and a LR parser generator for Python applications. Currently it builds SLR, LR(1) and LALR(1) parsing tables. Tables are kept in Python shelves for use in parsing. Some ambiguous grammars can be handled if priority and associativity information is provided.

Yappy can also be useful for teaching and learning LR parsers techniques, although for the moment no special functionalities are built-in, apart from some information in the Yappy API documentation.

Yappyis part of the htmladdnormallinkFAdoprojecthttp://www.ncc.up.pt/FAdo.



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node2.html0000644000076600000240000005124211404125044014030 0ustar dialout How to use Subsections

How to use

To build a parser you must provide:

a tokenizer
the rules for the lexical analyser
a grammar
the grammar productions and the associated semantic actions

After a parser is generated it can be used for parsing strings of the language.

We assume familiarity with some basic notions of formal languages as regular expressions, context-free grammars, LR grammars and LR parsing [ASU86,HMU00,GJ90]. Some knowledge of the Python language [Lut96] is also required.


Lexer

The class Lexer implements a lexical analyser based on Python regular expressions. An instance must be initialised with a tokenizer, which is, a list of tuples:

(re,funct,op?)

where:

re
is an uncompiled Python regular expression
funct
the name of a function that returns the pair (TOKEN, SPECIAL_VALUE), where TOKEN is the token to be used by the parser and SPECIAL_VALUE an eventual associated semantic value. If funct equals "" the token is ignored. This can be used for delimiters. The argument is the string matched by re.
op
if present, is a tuple with operator information: (TOKEN,PRECEDENCE,ASSOC) where PRECEDENCE is an integer (less than 10000) and ASSOC the string 'left' or 'right'.

Restriction: if a keyword is substring of another its rule must appear after the larger keyword for the obvious reasons...
The following list presents a tokenizer for regular expressions:

l = [("\s+",""),
     ("@epsilon",lambda x: (x,x)),
     ("@empty_set",lambda x: (x,x)),
     ("[A-Za-z0-9]",lambda x: ("id",x)),
     ("[+]",lambda x: ("+",x),("+",100,'left')),
     ("[*]",lambda x: (x,x),("*",300,'left')),
     ("\(|\)",lambda x: (x,x)) ]

A lexical analyser is created by instantiating a Lexer class:

>>> from yappy.parser import *
>>> a=Lexer(l)

Scanning

Lexer has two methods for scanning:

scan(): from a string
readscan(): from stdin

>>> from yappy.parser import *
>>> a=Lexer(l)
>>> a.scan("(a + b)* a a b*")
[('(', '('), ('id', 'a'), ('+', '+'), ('id', 'b'), (')', ')'), 
('*', '*'), ('id', 'a'), ('id', 'a'), ('id', 'b'), ('*', '*')]
>>>

See Yappy documentation for more details.


LRparser

The class LRParser implements a LR parser generator. An instance must be initialised with:

grammar
see Section 2.2.1
table_shelve
a file where the parser is saved
no_table
if 0, table_shelve is created even if already exists; default is 1.
tabletype
type of LR table: SLR (SLRtable), LR1 (LR1table), LALR (LALRtable); for now LALR can not be used with empty rules.

operators
provided by Lexer


Grammars

A grammar is a list of tuples

(LeftHandSide,RightHandSide,SemFunc,Prec)
with
LeftHandSide
nonterminal (currently a string)
RightHandSide
a list of symbols (terminals and nonterminals)
SemFunc
a semantic action
Prec
if present, a pair (PRECEDENCE,ASSOC) for conflict disambiguation.

Restriction: The first production is for the start symbol.

Here is an unambiguous grammar for regular expressions:

grammar = [("r",["r","|","c"],self.OrSemRule),
                        ("r",["c"],self.DefaultSemRule),
                    ("c",["c","s"],self.ConcatSemRule),
                    ("c",["s"],self.DefaultSemRule),
                    ("s",["s","*"],self.StarSemRule),
                    ("s",["f"],self.DefaultSemRule),
                    ("f",["b"],self.DefaultSemRule),
                    ("f",["(","r",")"],self.ParSemRule),
                    ("b",["id"],self.BaseSemRule),
                    ("b",["@empty_set"],self.BaseSemRule),
                    ("b",["@epsilon''],self.BaseSemRule)]

The previous description can be easily rephrased in a more user-friendly manner. We provide two ways:

grules() function
Allows the grammar productions being described as or (for empty rules). The rule symbol and the separator of the RHS words can be specified, default values are -> and whitespaces (i.e python regular expression). If no semantic rules, DefaultSemRule is assumed.

The previous grammar can be rewritten as:

 grammar = grules([("r -> r | c",self.OrSemRule),
                   ("r -> c",self.DefaultSemRule),
                   ("c -> c s",self.ConcatSemRule),
                   ("c -> s",self.DefaultSemRule),
                   ("s -> s *",self.StarSemRule),
                   ("s -> f",self.DefaultSemRule),
                   ("f -> b",self.DefaultSemRule),
                   ("f -> ( r )",self.ParSemRule),
                   ("b -> id",self.BaseSemRule),
                   ("b -> @empty_set",self.BaseSemRule),
                   ("b -> @epsilon",self.BaseSemRule)])

We can also write an ambiguous grammar if we provided precedence information, that allows to solve conflicts (shift-reduce).

grammar = grules([("reg -> reg | reg",self.OrSemRule),
                  ("reg -> reg reg",self.ConcatSemRule,(200,'left')),
                  ("reg -> reg *",self.StarSemRule),
                  ("reg -> ( reg )",self.ParSemRule),
                  ("reg -> id",self.BaseSemRule),
                  ("reg ->  @empty_set",self.BaseSemRule),
                  ("reg ->  @epsilon",self.BaseSemRule)
                   ])

As a string
that allows multiple productions for a left hand side:
  grammar ="""  reg -> reg + reg {{ self.OrSemRule }} |
               reg reg {{ self.ConcatSemRule }} // 200 left  |
               reg * {{ self.StarSemRule }} |
               ( reg ) {{self.ParSemRule }} |
               id {{ self.BaseSemRule }};
            """
where:

rulesym="->"
production symbol
rhssep=''
RHS symbols separator
opsym='//'
operator definition separator
semsym='{{'
semantic rule start marker
csemsym='}}'
semantic rule end marker
rulesep='|'
separator for multiple rules for a LHS
ruleend=';'
end marker for one LHS rule

The separators can be redefined in the tokenizer of Yappy_grammar class. An empty rule can be . If no semantic rule is given, DefaultSemRule is assumed.

See Yappy documentation for more details.

Semantic Actions

As usual the semantic value of an expression will be a function of the semantic values of its parts. The semantics of a token is defined by the tokenizer 2.1. The semantic actions for grammar rules are specified by Pythonfunctions that can be evaluated in a given context. Our approach is essentially borrowed from the kjParsing package [rs00]: a semantic function takes as arguments a list with the semantic values of the RightHandSide of a rule and a context and returns a value that represents the meaning of the LeftHandSide and performs any side effects to the context.

For instance, by default the semantic value of a rule can be the semantic value of the first element of the RightHandSide:

 def DefaultSemRule(list,context=None):
    """Default  semantic rule"""
    return list[0]

Assuming the definition of some objects for regular expressions, trivial semantic rules for printing regular expressions can be:

     def OrSemRule(self,list,context):
         return "%s+%s" %(list[0],list[2])

     def ConcatSemRule(self,list,context):
         return list[0]+list[2]

     def ParSemRule(self,list,context):
         return "(%s)" %list[1]

     def BaseSemRule(self,list,context):
         return list[0]

     def StarSemRule(self,list,context):
         return list[0]+'*'

Semantic actions can also be more Bison like, if they are a string where represents the semantics of its argments. For instance: .

Error handling

No error recovery is currently implemented. Errors are reported with rudimentary information, see the exception error classes in Yappy documentation.

Parser generation

Given the above information, a parser is generated by instantiating a LRparser class:

>>>from yappy.parser import *
>>>parse = LRparser(grammar,table,no_table,tabletype,operators)

Some information about LR table generated can be retrieved, by printing some attributes:

>>> print parse.cfgr
0 | ('reg', ['reg', '+', 'reg'], RegExp2.OrSemRule, ('100', 'left')) 
1 | ('reg', ['reg', 'reg'],RegExp2.ConcatSemRule, ('200', 'left')) 
2 | ('reg', ['reg', '*'],RegExp2.StarSemRule) 
3 | ('reg', ['(', 'reg', ')'], RegExp2.ParSemRule) 
4 | ('reg', ['id'], RegExp2.BaseSemRule ) 
5 | ('@S', ['reg'], DefaultSemRule) 
>>> print parse
Action table:
 
State
        +       *       (       )       id      $       #
0                       s1              s2
1                       s1              s2
2       r4      r4      r4      r4      r4      r4
3       s5      s6      s1              s2      r[]
4       s5      s6      s1      s8      s2
5                       s1              s2
6       r2      r2      r2      r2      r2      r2
7       r1      s6      r1      r1      s2      r1
8       r3      r3      r3      r3      r3      r3
9       r0      s6      r0      r0      s2      r0
 Goto table:
State
        reg     @S
0       3
1       4
3       7
4       7
5       9
7       7
9       7

If _DEBUG is set, several comments are printed during the table construction, in particular the collection of LR items.


Conflict resolution

If the grammar is ambiguous, parsing action conflicts will be generated. If the noconflicts attribute is 0, only the precedence and associativity information will be used for shift/reduce conflict resolution. But if noconflicts is 1, conflicts will be resolved in the standard manner (for yacc like-parsers):
shift/reduce
if precedence/associativity information is available try to use it; otherwise conflict is resolved in favor of shift. No messages will be given if the number of this type of conflicts is exactly the value of the expect attribute. The expect attribute can be set when some conflicts is legitimate.
reduce/reduce
the rule listed first will be choosed

If any of these conflicts occurs, a list of the resolved conflicts are listed and more information can be found in the Log attribute. The Log has the following attributes:

items
the set of LR items (self.Log.items) (not current available)
conflicts
the shift/reduce (sr) and the reduce/reduce (rr) conflicts(self.Log.conflicts)

Currently no prettyprinting is available for these values.

Parsing

The method parsing accepts a list of tokens and a context and returns a parsed result:
>>>parse.parsing(a.scan("(a+b)*aab*"))

The attribute output records the grammar rules that were applied for parsing the string:

>>>parse.output
[4, 4, 0, 3, 2, 4, 1, 4, 1, 4, 1, 2]
If _DEBUG is set, it is possible to see each application of a table action and the values in the stack.

Yappy

The Yappy class is a wrapper for defining a parser and for parsing. Basically it creates the lexical analyser and the parser. This class is a subclass of LRparser and can also define the Directories where the parsing tables are stored:

Extra arguments
Dictionary attributes:
tmpdir
Where the parse table used by the Yappy Grammar is stored
usrdir
Where the tables by the user tables are stored

It defines the following I/O functions:

input
for inputing a string to be parsed: or as argument, or if not given, from stdin. If parameter lexer=1 only lexical analysis is performed
inputfile
accepts input from a file

Here is a complete parser for regular expressions:

from yappy.parser import *

class ParseReg(Yappy):
     def __init__(self,no_table=0, table='tablereg'):
        grammar ="""
        reg -> reg + reg {{ self.OrSemRule }} |
               reg reg {{ self.ConcatSemRule }} // 200 left |
               reg * {{ self.StarSemRule }} |
               ( reg ) {{self.ParSemRule}} |
               id {{ self.BaseSemRule}} | 
               @empty_set {{ self.BaseSemRule}} | 
               @epsilon {{ self.BaseSemRule}} | ;
        """
        tokenize = [
        ("\s+",""),
        ("[A-Za-z0-9]",lambda x: ("id",x)),
        ("[+]",lambda x: ("+",x),("+",100,'left')),
        ("[*]",lambda x: (x,x),("*",300,'left')),
        ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table)

     ##Semantic rules build a parse tree...
     def OrSemRule(self,list,context):
         return "(%s+%s)" %(list[0],list[2])

     def ConcatSemRule(self,list,context):
         return "(%s%s)" %(list[0],list[1])

     def ParSemRule(self,list,context):
         return "(%s)" %list[1]

     def BaseSemRule(self,list,context):
         return list[0]

     def StarSemRule(self,list,context):
         return "(%s*)" %list[0]

An instance is used as:

>>> d = ParseReg()
>>> d.input("(a+b)*aab*")
>>> (a+b)*aab*

See Yappy documentation or Section [*] for more details.

Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node3.html0000644000076600000240000000756311404125044014040 0ustar dialout Download Subsections

Download

Yappy system requires Python ($ \geq$ 2.2). It was only tested in GNU Linux Systems.

It is available for download here.

Files

Here is a sample of the file tree created by a Debian package :

% dpkg -c python2.3-yappy_1.4-3_all.deb
drwxr-xr-x root/root         0 2004-12-02 18:29:45 ./
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/share/
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/share/doc/
drwxr-xr-x root/root         0 2004-12-02 18:29:43 ./usr/share/doc/python2.3-yap py/
-rw-r--r-- root/root      1122 2004-12-02 18:29:42 ./usr/share/doc/python2.3-yap py/copyright
-rw-r--r-- root/root       588 2004-12-02 18:28:57 ./usr/share/doc/python2.3-yap py/changelog.Debian.gz
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/lib/
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/lib/python2.3/
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/lib/python2.3/site-pack ages/
drwxr-xr-x root/root         0 2004-12-02 18:29:41 ./usr/lib/python2.3/site-pack ages/yappy/
-rw-r--r-- root/root     68725 2004-12-02 18:29:41 ./usr/lib/python2.3/site-pack ages/yappy/parser.py
-rw-r--r-- root/root      3629 2004-12-02 18:29:41 ./usr/lib/python2.3/site-pack ages/yappy/osets.py
-rw-r--r-- root/root        43 2004-12-02 18:29:41 ./usr/lib/python2.3/site-pack ages/yappy/__init__.py



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node4.html0000644000076600000240000000522511404125044014032 0ustar dialout History, current and future work

History, current and future work

This project began in early 2000 as part of the FAdo project. Although several parser generator systems are now available for Python it seems that the ones implementing LR parsers are not very dynamic...

Some current/future work includes:

And anyone who would like to collaborate/give suggestions is obviously welcome...



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node5.html0000644000076600000240000000661411404125044014036 0ustar dialout What's new

What's new

2009-08-14
Last version Yappy 1.9.3
  • Detection of Python shelve db type, when reusing a parsing table
  • Exceptions updated to Python 2.6
2009-06-30
Last version Yappy 1.9.1.

2008-05-30
Corrected lexer behaviour with empty strings (bug reported by Stephen Dann).

2006-07-10

  • In grammar specifications, separation between semantic rules (inside ) and precedence information.
  • Semantic rules a la Bison with semantic arguments: ( experimental implementation...)
  • The class Yappy allows dictonary arguments that can specify the directories where parser tables are stored (suggestion due to Guillaume Pothier)
2005-07-31

  • Corrected version for python2.4, that does not have module pcre.
  • Corrected handling of some exceptions in LRparser (thanks due to Ian Jackson)
2004-03-25

  • Corrected bug in Yappy_grammar semantic rules, concerning precedence values.
  • If _DEBUG is set, the collection of items used in the generation of a LR table is printed.



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node6.html0000644000076600000240000004412211404125044014033 0ustar dialout Documentation

Documentation

The Yappy api documentation can be found here

Some simple demo parsers can be found in the file demo.py in the examples directory (for instance /usr/share/doc/python2.2/examples).


# -*- coding: utf-8 -*-
#
# 
# This is part of Yappy
#
#
# demo.py -- some simple parsers
#
# Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt
#

#from yappy.parser import *
import sys, string
from yappy.parser import *
############## Demos  #####################

class SimpleExp(Yappy):
    """ A parser  for simple arithmetic expresssion. Allows blanks
    between tokens"""
    def __init__(self,no_table=0, table='saexp.tab'):
        grammar = grules([
                ("E -> E + T", self.Add),
                ("E ->T", DefaultSemRule),
                ("T -> T * F", self.Mul ),
                ("T -> F", DefaultSemRule),
                ("F -> ( E )", self.ParSemRule),
                ("F -> id", DefaultSemRule)]
                         )
        tokenize=[("\s+",""),
               ("\d+",lambda x: ("id",int(x))),
               ("\+",lambda x: (x,x)),
               ("\*",lambda x: (x,x)),
               ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,
    tmpdir='/tmp')
        
    
    def ParSemRule(self,list,context=None):
        return list[1]

    def DoPrint(self,list,context=None):
        print list[0]
        return list[0]

    def Add(self,list,context):
        print list
        return list[0] + list[2]

    def Mul(self,list,context):
        print list
        return list[0] * list[2]

    def test(self):
        st = " 2 + 24 + 34 * 2 + 1"
        print "Input: %s" %st
        print "Result:", self.input(st)   

class SimpleExp3(SimpleExp):
    """ A parser  for simple arithmetic expresssion. Allows blanks
    between tokens"""
    def __init__(self,no_table=0, table='saexp.tab'):
        grammar = """ 
                E -> E + T {{ "sum([$0,$2])"}};
                E ->T ;
                T -> T * F" {{ self.Mul }};
                T -> F ;
                F -> ( E ) {{ self.ParSemRule}};
                F -> id;
                """
        tokenize=[("\s+",""),
               ("\d+",lambda x: ("id",int(x))),
               ("\+",lambda x: (x,x)),
               ("\*",lambda x: (x,x)),
               ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table)
        
class SimpleExp1(Yappy):
    """ A parser  for simple arithmetic expresssions, with operators """
    def __init__(self,no_table=0, table='saexp1.tab', tabletype=LALRtable,noconflicts=1,expect=0):
        grammar = grules([
                ("E -> E add_op T", self.Add),
                ("E ->T", DefaultSemRule),
                ("T -> T mul_op F", self.Mul),
                ("T -> F", DefaultSemRule),
                ("F -> ( E )", self.ParSemRule),
                ("F -> id", DefaultSemRule)])

        tokenize=[("\d+",lambda x: ("id",int(x))),
               ("[+-]",lambda x: ("add_op",self.make_op(x))),
               ("[*/]",lambda x: ("mul_op",self.make_op(x))),
               ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect)

    def make_op(self,op):
        return {"+"  : operator.add,
		'-'  : operator.sub,
                '*'   : operator.mul,
                '/'   : operator.div,
                '%'   : operator.mod
                }[op]
    
    def ParSemRule(self,list,context=None):
        return list[1]

    def DoPrint(self,list,context=None):
        print list[0]
        return list[0]

    def Add(self,list,context):
        print list
        return apply(list[1],[list[0],list[2]])

    def Mul(self,list,context):
        print list
        return apply(list[1],[list[0],list[2]])

    def test(self):
        st = "2-24*9"
        st1 = "2-24*9-34*2+1"
        print "Input: %s" %st
        print "Result:", self.input(st)


class SimpleExp2(SimpleExp1):
    """ A parser  for simple arithmetic expresssions with prec and associativity"""
    def __init__(self,no_table=0, table='saexp2.tab',
    tabletype=LALRtable,noconflicts=1,expect=0):
        self.line = 0
        grammar = grules([
                ("E -> E add_op T", self.Add),
                ("E ->T", DefaultSemRule),
                ("T -> T mul_op F", self.Mul),
                ("T -> F", DefaultSemRule),
                ("F -> ( E )", self.ParSemRule),
                ("F -> id", DefaultSemRule)])

        tokenize=[("\d+",lambda x: ("id",int(x))),
                  ("\n+",lambda x: (x,self.countline())),
               ("[+-]",lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')),
               ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')),
               ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect)
        

    def countline(self):
        self.line+=1
        return ""
    
    def make_op(self,op):
        """ """
        return {"+"  : operator.add,
		'-'  : operator.sub,
                '*'   : operator.mul,
                '/'   : operator.div,
                '%'   : operator.mod
                }[op]
    
    def ParSemRule(self,list,context=None):
        return list[1]

    def DoPrint(self,list,context=None):
        print list[0]
        return list[0]

    def Add(self,list,context):
        print list
        return apply(list[1],[list[0],list[2]])

    def Mul(self,list,context):
        print list
        return apply(list[1],[list[0],list[2]])

class SimpleExpAmb(SimpleExp2):
    """A parser  for simple arithmetic expresssions with an ambiguous grammar """
    def __init__(self,no_table=0, table='expamb.tab',tabletype=LALRtable,noconflicts=1,expect=0):
        grammar = grules([
                ("E -> E add_op E", self.Add),
                ("E -> E mul_op E", self.Mul),
                ("E -> ( E )", self.ParSemRule),
                ("E -> id", DefaultSemRule)])

        sinal = "[+-]"
        integer ="\d" 
        tokenize=[("(%s)+"%integer,lambda x: ("id",int(x))),
               (sinal,lambda x: ("add_op",self.make_op(x)),("add_op",100,'left')),
               ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')),
               ("\(|\)",lambda x: (x,x)) ]

        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect)


class SimpleExpAmb2(SimpleExp2):
    """A parser  for simple arithmetic expresssions with an ambiguous
    grammar, and context-dependent precedence """
    def __init__(self,no_table=0, table='expamb.tab',tabletype=LALRtable,noconflicts=1,expect=0):
        grammar = grules([
                ("E -> E add_op E", self.Add),
                ("E -> E mul_op E", self.Mul),
                ("E -> n_op E", lambda l,c: -1*l[1]),
                ("E -> ( E )", self.ParSemRule),
                
                ("E -> id", DefaultSemRule)])

        plus = "[+-]"
        integer = "\d" 
        tokenize=[("(%s)+"%integer,lambda x: ("id",int(x))),
               ("%s"%plus,lambda x:
                ("add_op",self.make_op(x)),("add_op",100,'left')),
                ("~",lambda x: ("n_op",self.make_op('-')),("n_op",300,'left')),
               ("[*/]",lambda x: ("mul_op",self.make_op(x)),("mul_op",200,'left')),
               ("\(|\)",lambda x: (x,x)) ]

        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect)
    def test(self):
        st=[
         "~2",
        "2-24*9",
        "2-24*9-34*2+1",
        "~2-24*9-34*2+1",
         "2+3+(~5*(2+3)*2)-24*9-34*2+1"
         ]
        for i in st:
            print "Input: %s" %i
            print "Result:", self.input(i)


class ListAVG(Yappy):
    """A parser  for transforming a list atrib=value into a python dictionary """
    def __init__(self,no_table=0, table='Listavg.tab'):
        grammar =   """
            E -> ( ) {{self.EmptyDict}};
            E ->  ( AVL ) {{self.ParSemRule}} ;
            AVL ->   AV , AVL  | AV {{EmptySemRule}} ;
            AV -> tok = tok {{ self.AddItem }};
            """
    
        tokenize = [
             ("\s+",""),
             ("[A-Za-z0-9]+",lambda x: ("tok",x)),
             ("\=",lambda x: (x,x)),
             (",",lambda x: (x,x)),
             ("\(|\)",lambda x: (x,x)) ]

        Yappy.__init__(self,tokenize,grammar,table,no_table)


    def ParSemRule(self,list,context=None):
        return list[1]

    def DoPrint(self,list,context=None):
        print list[0]
        return list[0]


    def EmptyDict(self,list,context):
        return []

    def AddItem(self,list,context):
        if not isinstance(list[0], StringType):
            raise NameError, "Key %s must be a string" % list[0] 
        context[list[0]] = list[2] 
        return []

    def test(self):
        st = "(a=5,b=6,c=7)"
        print "Input: %s" %st      
        self.input(st,context={})
        print self.context

class ListAVG1(ListAVG):
    """A parser  for transforming a list atrib=value into a python dictionary """
    def __init__(self,no_table=0, table='Listavg1.tab'):
        grammar =   """
            E -> ( ) {{self.EmptyDict}};
            E -> ( AVL ) {{self.ParSemRule}} ;
            AVL ->   AV , AVL   | AV {{EmptySemRule}} ;
            AV -> tok = tok {{ self.AddItem }};
            """
        tokenize = [
             ("\s+",""),
             ("[A-Za-z0-9]+",lambda x: ("tok",x)),
             ("\=",lambda x: (x,x)),
             (",",lambda x: (x,x)),
             ("\(|\)",lambda x: (x,x)) ]

        Yappy.__init__(self,tokenize,grammar,table,no_table)
        
class ListAVG2(ListAVG):
    """A parser  for transforming a list atrib=value into a python dictionary """
    def __init__(self,no_table=0, table='Listavg1.tab'):
        grammar =   """
            E -> ( AVL ) {{self.ParSemRule}} ;
            AVL ->   AV , AVL  {{DefaultSemRule}} | ;
            AV -> tok = tok {{ self.AddItem }};
            """
        tokenize = [
             ("\s+",""),
             ("[A-Za-z0-9]+",lambda x: ("tok",x)),
             ("\=",lambda x: (x,x)),
             (",",lambda x: (x,x)),
             ("\(|\)",lambda x: (x,x)) ]

        Yappy.__init__(self,tokenize,grammar,table,no_table)


    def test(self):
        st = "(a=5,b=6,c=7,)"
        
        print "Input: %s" %st      
        self.input(st,context={})
        print self.context

        
class RegExp(Yappy):
     def __init__(self,no_table=0, table='regamb.tab',
     tabletype=LALRtable,
                  noconflicts=1,expect=0):
        """ A parser for regular expressions with operators. Semantic
     rules are dummy..."""
        grammar = grules([("r -> r | r",self.OrSemRule),
                        ("r -> r . r",self.ConcatSemRule),
                    ("r -> r *",self.StarSemRule, (300,'left')),
                    ("r -> ( r )",self.ParSemRule),
                    ("r -> id",self.BaseSemRule),
                   ])
        tokenize =[
                    ("[A-Za-z0-9]",lambda x: ("id",x)),
                    ("[+|]",lambda x: ("|",x),("|",100,'left')),
                    ("[\.]",lambda x: (".",""),(".",200,'left')),
                    ("[*]",lambda x: (x,x), ("*",300,'left')),
                    ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype)

     ##Semantic rules build a parse tree...
     def OrSemRule(self,list,context):
         return "(%s+%s)" %(list[0],list[2])

     def ConcatSemRule(self,list,context):
         return "(%s%s)" %(list[0],list[2])

     def ParSemRule(self,list,context):
         return "(%s)" %list[1]

     def BaseSemRule(self,list,context):
         return list[0]

     def StarSemRule(self,list,context):
         return "(%s*)" %list[0]

     def test(self):
         st  = ["(a+b)*.a.a.b*",
                "a+a.b+a.b.(a+a)*",
                "a+a.b+a.(a+a)**",
                 "a+a.b.c",
                "a+a.b.(c+b)",
                "a+a.b.(c+b)*",
                 "a+a.b*.(a+b)"]
         for i in st:
             print "Input: %s" %i
             print self.input(i)

class RegExp1(RegExp):
     def __init__(self,no_table=0, table='tableambreg1',tabletype=LALRtable,
                  noconflicts=1,expect=0):
        """A parser for regular expressions with ambiguous rules  """
        grammar = grules([("reg -> reg + reg",self.OrSemRule),
                                ("reg -> reg reg",self.ConcatSemRule,(200,'left')),
                               ("reg -> reg *",self.StarSemRule),
                               ("reg -> ( reg )",self.ParSemRule),
                               ("reg -> id",self.BaseSemRule)
                   ])
        tokenize =[
                    ("[A-Za-z0-9]",lambda x: ("id",x)),
                    ("[+|]",lambda x: ("+",x),("+",100,'left')),
                    ("[*]",lambda x: (x,x)),
                    ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,tabletype,noconflicts,expect)
        
     def ConcatSemRule(self,list,context=None):
         return "(%s%s)" %(list[0],list[1])


     def test(self):
         st  = ["(a+b)*aab*",
                "(a+ab)*a*",
                "(a+a)a+ab",
                "a+ab+(a(a+a)*)*",
                "a+ab+a(a+a)**",
                "(a+a)**ab(a+b)**",
                "aa+bb**",
                "(a+ab)(a+ab)(ac+a)",
                "a+abc+ad",
                "abc+b+ad",
                "a+ab",
                "a+b+ab+cccaaaaaa",
                "a+ab(a+a)",
                "ab+ab(a+a)a*",
                "a+ab*",
                "(a+ab*(a+b))*",
                "a+ab*(a+b)",
                "a+c+ab(a+b)",
                "a+c+(a+b)ab",
                "a+b*",
                "aa+b*",
                "aab*ab+a*+aa",
                "aab*ab**+(a+aa)**"
                ]
         for i in st:
             print "Input: %s" %i
             print self.input(i)


class RegExp2(RegExp1):
     def __init__(self,no_table=0, table='tableambreg2'):
         grammar = """
        reg -> reg + reg {{ self.OrSemRule }} |
               reg reg {{ self.ConcatSemRule}} // 200 left|
               reg * {{ self.StarSemRule }} |
               ( reg ) {{self.ParSemRule }} |
               id {{ self.BaseSemRule }} ;
        """
         tokenize = [("@epsilon",lambda x: ("id",x)),
                    ("@empty_set",lambda x: ("id",x)),
                    ("[A-Za-z0-9]",lambda x: ("id",x)),
                    ("[+|]",lambda x: ("+",x),("+",100,'left')),
                    ("[*]",lambda x: (x,x)),
                    ("\(|\)",lambda x: (x,x)) ]

         Yappy.__init__(self,tokenize,grammar,table,no_table)

class RegExp3(RegExp):
     def __init__(self,no_table=0, table='tableambreg3'):
        """A erronous parser for regular expressions with ambiguous rules and
     no precedence information """
        grammar = grules([("reg -> reg | reg",self.OrSemRule),
                                ("reg -> reg reg",self.ConcatSemRule),
                               ("reg -> reg *",self.StarSemRule),
                               ("reg -> ( reg )",self.ParSemRule),
                               ("reg -> id",self.BaseSemRule),
                   ])
        tokenize =[
                    ("[A-Za-z0-9]",lambda x: ("id",x)),
                    ("[+|]",lambda x: ("|",x)),
                    ("[*]",lambda x: (x,x)),
                    ("\(|\)",lambda x: (x,x)) ]
        Yappy.__init__(self,tokenize,grammar,table,no_table,LALRtable,1)
        
     def ConcatSemRule(self,list,context=None):
         return list[0]+list[1]

     def test(self):
         st  = "(a+b)*aab*"
         print "Input: %s" %st
         print self.input(st)
        
def Sum(a,b):
    return a+b

def curry(f,*a,**kw):
    def curried(*more_a,**more_kw):
        return f(*(a+more_a),**dict(kw,**more_kw))
    return curried

if __name__ == '__main__':
    d = SimpleExpAmb()
    st = "2-24*9"
    print "Input:", st
    print "Result:", d.input(st)
    st = "2-24*9-34*2+1"
    print "Input:", st
    print "Result:", d.input(st)
    d = RegExp2()
    print "Result:", d.input("a+b*")



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node7.html0000644000076600000240000000422211404125044014031 0ustar dialout Related projects

Related projects

Yappyis part of the htmladdnormallinkFAdoprojecthttp://www.ncc.up.pt/FAdo.



Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/node8.html0000644000076600000240000000544511404125044014042 0ustar dialout Bibliography

Bibliography

ASU86
Alfred V. Aho, Ravi Sethi, and Jeffrey D. Ullman.
Compilers: Principles, Techniques and Tools.
Addison Wesley, 1986.

GJ90
Dick Grune and Ceriel J.H. Jacobs.
Parsing Techniques - A Practical Guide.
Prentice Hall, 1990.
Available for downloading in PostScript format.

HMU00
John E. Hopcroft, Rajeev Motwani, and Jeffrey D. Ullman.
Introduction to Automata Theory, Languages and Computation.
Addison Wesley, 2nd edition, 2000.

Lut96
M. Lutz.
Programming Python.
O'Reilly & Associates, 1996.

rs00
Aaron Watte rs.
Parse Generation in Python.
New Jersey Institute of Technology, 2000.


Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/html/previous.gif0000644000076600000240000000037411404125044014476 0ustar dialoutGIF89a |si_VLCv:m0c&YPFy =p3f, ` dihlp,GcL.QCHAa4\DiT#hBW0t!"p5# ̈́ # ~}I"EcWtoGPWv=sh 4h`cQ ^e 76+!;yappy-1.9.4/html/pub.css0000644000076600000240000000144111404125044013427 0ustar dialout/* implement both fixed-size and relative sizes */ SMALL.XTINY { font-size : xx-small } SMALL.TINY { font-size : x-small } SMALL.SCRIPTSIZE { font-size : smaller } SMALL.FOOTNOTESIZE { font-size : small } SMALL.SMALL { } BIG.LARGE { } BIG.XLARGE { font-size : large } BIG.XXLARGE { font-size : x-large } BIG.HUGE { font-size : larger } BIG.XHUGE { font-size : xx-large } BODY { background-color:"#FFFFF0"; color: DARKBLUE; } /*h1,h2,h3 { color: LightBlue; }*/ h1,h2,h3 { color: #005A9C;} a:link { color: red; } h1,h2,h3 a:link { color: #005A9C; } a.offsite { font-style: oblique; } a:visited { color: darkred; } a:hover, a:active, a:focus { background: #FFFFAA; } blockquote { font-size: small} dt { font-weight: bold } th { }yappy-1.9.4/html/up.gif0000644000076600000240000000047411404125044013247 0ustar dialoutGIF89a |si_VLCv:m0c&YPFy =p3f, ` d9 fì*8QH@u}1A!Is@JkeeKȡ@lۊ3D h?+j5N Db50m N D"J51Z$K%j1%DT% v2e%|5/%O5U* D%x)%u%D"җ"DL׀y !;yappy-1.9.4/html/WARNINGS0000644000076600000240000000026211404125044013302 0ustar dialoutNo implementation found for style `a4wide' yappyDoc.tex is newer than yappyDoc.aux: Please rerun latex. yappyDoc.tex is newer than yappyDoc.bbl: Please rerun latex and bibtex. yappy-1.9.4/html/yappyDoc.css0000644000076600000240000000204211404125044014427 0ustar dialout/* Century Schoolbook font is very similar to Computer Modern Math: cmmi */ .MATH { font-family: "Century Schoolbook", serif; } .MATH I { font-family: "Century Schoolbook", serif; font-style: italic } .BOLDMATH { font-family: "Century Schoolbook", serif; font-weight: bold } /* implement both fixed-size and relative sizes */ SMALL.XTINY { font-size : xx-small } SMALL.TINY { font-size : x-small } SMALL.SCRIPTSIZE { font-size : smaller } SMALL.FOOTNOTESIZE { font-size : small } SMALL.SMALL { } BIG.LARGE { } BIG.XLARGE { font-size : large } BIG.XXLARGE { font-size : x-large } BIG.HUGE { font-size : larger } BIG.XHUGE { font-size : xx-large } /* heading styles */ H1 { } H2 { } H3 { } H4 { } H5 { } /* mathematics styles */ DIV.displaymath { } /* math displays */ TD.eqno { } /* equation-number cells */ /* document-specific styles come next */ DIV.navigation { } DIV.small { } DIV.quote { } PRE.preform { } SPAN.arabic { } SPAN.textbf { font-weight: bold } SPAN.textit { font-style: italic } yappy-1.9.4/html/yappyDoc.html0000644000076600000240000000741611404125044014615 0ustar dialout Yappy Yet another LR(1) parser generator for Python

Yappy
Yet another LR(1) parser generator for Python

Rogério Reis, Nelma Moreira

DCC-FC & LIACC, Universidade do Porto, Portugal

2000-2006



Comments/Suggestions




Nelma Moreira, Rogrio Reis 2010-06-10
yappy-1.9.4/setup.py0000644000076600000240000000066211404125030012674 0ustar dialoutfrom distutils.core import setup if __name__ == '__main__' : setup(name="yappy", packages = ['yappy'], version="1.9", description="Yet another parser generator for Python", author="Rogerio Reis and Nelma Moreira", author_email="{rvr,nam}@ncc.up.pt", url="http://www.ncc.up.pt/fado/Yappy", maintainer="Rogerio Reis", maintainer_email="rvr@ncc.up.pt") yappy-1.9.4/yappy/0000755000076600000240000000000011404125030012320 5ustar dialoutyappy-1.9.4/yappy/__init__.py0000644000076600000240000000005311404125030014427 0ustar dialoutversion="1.0" __all__= ["parser","osets"] yappy-1.9.4/yappy/__init__.pyc0000640000076600000240000000032011404125030014563 0ustar dialout /Jc@sdZddgZdS(s1.0tparsertosetsN(tversiont__all__(((s0/mnt/hgfs/Users/rvr/Work/Yappy/yappy/__init__.pyssyappy-1.9.4/yappy/osets.py0000644000076600000240000000705511404125030014036 0ustar dialout# -*- coding: UTF-8 -*- """ This is part of Yappy osets.py -- a Set private implementation Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt Version: $Id: osets.py,v 1.3 2004/02/18 10:54:48 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. @author: Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt """ class Set(object): """ Sets: that is lists without order or repetition. May be used everywhere lists are used... because they rely on them.""" def __init__(self,list=[]): foo=[] for m in list: if not m in foo: foo.append(m) self.members = foo def __getitem__(self, index): return self.members[index] def __setitem__(self,index,value): self.members[index] = value def __getattr__(self,name): return getattr(self.members, name) def __add__(self, other): new = Set(self.members[:]) for v in other: if not v in new: new.append(v) return new def __iadd__(self, other): return self + other def __radd__(self,other): return self + other def __sub__(self, other): new = Set(self.members[:]) for v in other: try: del(new.members[new.index(v)]) except ValueError: continue return new def __cmp__(self,other): if len(self) == len(other): if not len(self - other): return(0) return(1) def __len__(self): return len(self.members) def __str__(self): return str(self.members) def __repr__(self): return "Set %s"%str(self.members) def __getslice__(self,low,high): return Set(self.members[low:high]) def __delslice__(self,low,high): for i in range(low,max(high+1,len(self.members)-1)): del self.members[i] def __delitem__(self,key): del self.members[key] def append(self,member): if not member in self.members: self.members.append(member) def s_append(self,member): e = 0 if not member in self.members: self.members.append(member) e = 1 return e def empty(self): return len(self.members) == 0 def s_extend(self,other): e = 0 for v in other: if not v in self: self.members.append(v) e = 1 return e def sort(self): self.members.sort() def index(self, index): return self.members.index(index) def remove(self,v): try: del(self.members[self.index(v)]) except ValueError: pass def copy(self): return Set(self.members[:]) def first(self): return self.members[0] # duplicates a set (shallow copy) def dup(self): new = Set() new.members = self.members[:] return new yappy-1.9.4/yappy/osets.pyc0000640000076600000240000001435111404125030014172 0ustar dialout xD3@c@s dZdefdYZdS(s This is part of Yappy osets.py -- a Set private implementation Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt Version: $Id: osets.py,v 1.3 2004/02/18 10:54:48 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. @author: Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt tSetcBseZdZgdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZdZdZdZdZdZdZdZdZdZRS(sz Sets: that is lists without order or repetition. May be used everywhere lists are used... because they rely on them.cCsBg}x,|D]$}||jo|i|q q W||_dS(N(tappendtmembers(tselftlisttfootm((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__init__s  cCs |i|S(N(R(Rtindex((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt __getitem__&scCs||i||D]6}y|i|i|=Wqtj o qqXqW|S(N(RRRt ValueError(RRRR((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__sub__<s cCs:t|t|jot||pdSq6ndS(Nii(tlen(RR((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__cmp__Ds cCs t|iS(N(RR(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__len__JscCs t|iS(N(tstrR(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__str__MscCsdt|iS(NsSet %s(RR(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt__repr__PscCst|i||!S(N(RR(Rtlowthigh((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt __getslice__SscCsBx;t|t|dt|idD]}|i|=q*WdS(Ni(trangetmaxRR(RRRti((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt __delslice__Vs*cCs|i|=dS(N(R(Rtkey((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyt __delitem__ZscCs(||ijo|ii|ndS(N(RR(Rtmember((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyR]scCs4d}||ijo|ii|d}n|S(Nii(RR(RR&te((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyts_appendas  cCst|idjS(Ni(RR(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pytemptyhscCsBd}x5|D]-}||jo|ii|d}q q W|S(Nii(RR(RRR'R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyts_extendks cCs|iidS(N(Rtsort(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyR+rscCs|ii|S(N(RR(RR((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyRuscCs1y|i|i|=Wntj onXdS(N(RRR(RR((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pytremovexscCst|iS(N(RR(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pytcopy}scCs |idS(Ni(R(R((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pytfirstscCst}|i|_|S(N(RR(RR((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pytdups  (t__name__t __module__t__doc__RR R RRRRRRRRRRR#R%RR(R)R*R+RR,R-R.R/(((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyRs4                        N(R2tobjectR(((s-/mnt/hgfs/Users/rvr/Work/Yappy/yappy/osets.pyssyappy-1.9.4/yappy/parser.py0000644000076600000240000021614511404125030014177 0ustar dialout# -*- coding: utf-8 -*- """ This is part of Yappy parser.py -- Yet another parser for python... A LR parser generator, based on Aho and al. 1986, C{Compilers} (aho86:_compil). It currently builds C{SLR}, C{LR(1)} and C{LALR(1)} parsing tables. Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt Version: $Id: parser.py,v 1.18 2006-07-19 09:52:06 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. @author: Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt @var _DEBUG: if nonzero, display information during parser generation or parsing. @type _DEBUG: integer """ from types import * import re, string import sys, string, copy, time, operator import os, os.path import shelve, anydbm, whichdb # set elements are mutable objects; we cannot use sets import osets #Globals _DEBUG=0 _Version = "1.9.4" NIL = "" class Lexer: """Class for lexical analyser to use with the parser @ivar rules: lexical rules @ivar operators: precedence and associativity for operators @type operators: dictionary """ def __init__(self,rules_list): """ By now lexer is kept as simple as possible, so order is really essential: i.e. if a keyword is substring of another its rule must appear after the larger keyword for the obvious reasons... @param rules_list: contains pairs C{(re,funct,op?)} where: C{re}: is an uncompiled python regular expression C{funct}: the name of a funcion that returns the pair C{(TOKEN, SPECIAL_VALUE)}, where C{TOKEN} is the token to be used by the parser and C{SPECIAL_VALUE} an eventual associated value. The argument is the matched string. If C{funct} equals C{""} the token is ignored. This can be used for delimiters. C{op}: if present, is a tuple with operador information: C{(TOKEN,PRECEDENCE,ASSOC)} where C{PRECEDENCE} is an integer and C{ASSOC} the string 'left' or 'right'. """ self.rules = [] rnumber = 1 for r in rules_list: try: rex = r[0] funct = r[1] except IndexError: raise LexicalError(rnumber,r) try: rec = re.compile(rex) except TypeError: raise LexicalRulesErrorRE(rex,rnumber) try: op,prec,assoc = r[2] if not self.__dict__.has_key("operators"): self.operators = {} if not self.operators.has_key(op): self.operators[op] = (prec,assoc) except IndexError: pass self.rules.append((rec,funct)) rnumber = rnumber + 1 if _DEBUG and self.__dict__.has_key("operators"): print "operators %s" %self.operators def scan(self,string): """Performs the lexical analysis on C{string} @return: a list of tokens (pairs C{(TOKEN , SPEcial_VALUE )}), for recognized elements and C{("@UNK", string )} for the others""" st = [string] for r in self.rules: st = self.scanOneRule(r,st) return self.scanUnknown(st) def scanOneRule(self,rule,st): """Scans space C{st} according only one rule @param rule: one rule C{(re,fun,op)} @param st: is a list of strings and already matched structures """ re = rule[0] fun = rule[1] st1 = [] for s in st: if not isinstance(s, StringType): st1.append(s) else: s1 = s while True: m = re.search(s1) if not m: st1.append(s1) break else: if m.start() != 0: st1.append(s1[0:m.start()]) # if fun == "": # st1.append(("",s1[m.start():m.end()])) # else: if fun != "": st1.append(apply(fun,[s1[m.start():m.end()]])) if m.end() == len(s1): break else: s1 = s1[m.end():] return st1 def scanUnknown(self,st): """Scans the resulting structure making Unknown strings Unknown parts will be of the form ("@UNK", string ) """ st1 = [] for s in st: if isinstance(s, StringType): st1.append(("@UNK",s)) else: st1.append(s) return st1 def readscan(self): """Scans a string read from stdin """ st = raw_input() if not st: raise IOError if isinstance(st, StringType): s = self.scan(st) return s class YappyError(Exception): """Class for all Yappy exceptions""" pass class StackUnderflow(YappyError): pass class LexicalError(YappyError): """Class for all Yappy Lexical analyser exceptions""" def __init__(self,r,rule): self.value = 'Error in rule number %s: %s'%(r,rule) def __str__(self): return "%s" % (self.value) class LexicalRulesErrorRE(YappyError): """An error occured parsing the RE part of a lexical rule""" def __init__(self,re,no=0): self.value = 'Error in RE "%s" at rule n.%d'%(re,no) self.rule = no self.re = re def __str__(self): return "%s" % (self.value) class GrammarError(YappyError): """Class for input grammar errors """ def __init__(self,rule): self.value = 'Error in rule "%s" '%rule def __str__(self): return "%s" % (self.value) class SLRConflictError(YappyError): """Confliting actions in building SLR parsing table. Grammar is not SLR(0)""" def __init__(self,i,a): self.value = 'Confliting action[%d,%s] in SLR parsing table ' %(i,a) self.item = i self.symbol = a def __str__(self): return "%s" % (self.value) class LRConflictError(YappyError): """Conflicting actions in building LR parsing table. Grammar is not LR(1)""" def __init__(self,i,a): self.item = i self.symbol = a self.value = 'Confliting action[%d,%s] in LR(1) parsing table ' %(self.item,self.symbol) def __str__(self): return "%s" % (self.value) class LRConflicts(YappyError): """Confliting actions in building LR parsing table. Grammar is not LR(1)""" def __init__(self): self.value = """Warning>>> Several confliting actions. Please consult self.Log for details""" def __str__(self): return "%s" % (self.value) class LRParserError(YappyError): """An error occured in LR parsing program""" def __init__(self,s,a): self.item = s self.symbol = a self.value = 'Error in LR: (%s,%s) not found' %(self.item,self.symbol) def __str__(self): return "%s" %(self.value) class SemanticError(YappyError): """An error occured in the application of a semantic action""" def __init__(self,m,n=0,r=None): self.value = m self.nrule = n self.rule = r def __str__(self): return "%s in semantic rule %d: %s" % (self.value,self.nrule,self.rule) class TableError(YappyError): """Mismatch table version """ def __init__(self,t): self.value = """A new table must be built. Please remove table shelve %s or set no_table to 0""" %t def __str__(self): return "%s" % (self.value) class CFGrammar: """ Class for context-free grammars @ivar rules: grammar rules @ivar terminals: terminals symbols @ivar nonterminals: nonterminals symbols @ivar start: start symbol @type start: string @ivar ntr: dictionary of rules for each nonterminal """ def __init__(self,grammar): """ @param grammar: is a list for productions; each production is a tuple C{(LeftHandside,RightHandside,SemFunc,Prec?)} with C{LeftHandside} nonterminal, C{RightHandside} list of symbols, C{SemFunc} syntax-direct semantics, if present C{Prec (PRECEDENCE,ASSOC)} for ambiguous rules First production is for start symbol Special symbols: C{@S}, C{$}, C{#} """ """ MUST BE IN THIS ORDER""" self.rules = grammar self.makenonterminals() self.maketerminals() self.start = self.rules[0][0] self.aug_start = "@S" self.rules.append((self.aug_start,[self.start],DefaultSemRule)) self.endmark = '$' self.dummy = '#' self.terminals.append(self.endmark) self.terminals.append(self.dummy) self.nonterminals.append(self.aug_start) """ ritems are only for control ... not needed """ self.ritems = [] """ ntr[A] is the set of rules which has A as left side""" self.ntr = {} i = 0 for r in self.rules: if not self.ntr.has_key(r[0]): self.ntr[r[0]] = [i] else: self.ntr[r[0]].append(i) for j in range(len(r[1]) + 1): self.ritems.append((i,j)) i = i + 1 def __str__(self): """Grammar rules @return: a string representing the grammar rules """ s = "" for n in range(len(self.rules)): lhs = self.rules[n][0] rhs = self.rules[n][1] s = s + "%s | %s -> %s \n" %(n, lhs, string.join(rhs," ")) return "Grammar Rules:\n\n%s" % s def makeFFN(self): self.NULLABLE() self.FIRST_ONE() self.FOLLOW() def maketerminals(self): """Extracts C{terminals} from the rules. C{nonterminals} must already exist""" self.terminals = [] for r in self.rules: for s in r[1]: if s not in self.nonterminals and s not in self.terminals: self.terminals.append(s) def makenonterminals(self): """Extracts C{nonterminals} from grammar rules.""" self.nonterminals = [] for r in self.rules: if r[0] not in self.nonterminals: self.nonterminals.append(r[0]) def NULLABLE(self): """Determines which nonterminals C{X ->* []} """ self.nullable = {} for s in self.terminals: self.nullable[s] = 0 for s in self.nonterminals: self.nullable[s] = 0 if self.ntr.has_key(s): for i in self.ntr[s]: if not self.rules[i][1]: self.nullable[s] = 1 break k = 1 while k == 1: k = 0 for r in self.rules: e = 0 for i in r[1]: if not self.nullable[i]: e = 1 break if e == 0 and not self.nullable[r[0]]: self.nullable[r[0]] = 1 k = 1 def FIRST(self,s): """C{FIRST(s)} is the set of terminals that begin the strings derived from s """ first = osets.Set([]) e = 0 for i in range(len(s)): first.s_extend(self.first[s[i]]) if not self.nullable[s[i]]: e = 1 break if e == 0: self.nullable[string.join(s)] = 1 else: self.nullable[string.join(s)] = 0 return first def FIRST_ONE(self): """Determines C{FIRST(s)}, for every symbol s, that is the set of terminals that begin the strings derived from s """ self.first = {} self.nd = {} self.ms =Stack() for s in self.terminals: self.first[s] = osets.Set([s]) for s in self.nonterminals: if self.ntr.has_key(s) and not self.first.has_key(s): # self.FIRST_NT(s) self.FIRST_TRA(s,1) def FIRST_TRA(self,s,d): """Transitiv closure of C{FIRST(X)} """ self.ms.push(s) self.nd[s] = d """ calculating F1(s)""" self.first[s] = osets.Set([]) for i in self.ntr[s]: for y in self.rules[i][1]: if self.nullable[y]: continue else: if y in self.terminals: self.first[s].append(y) break """transitive closure""" for i in self.ntr[s]: for y in self.rules[i][1]: if y in self.nonterminals: if not self.first.has_key(y): self.FIRST_TRA(y,d+1) if self.nd.has_key(y) and self.nd[y] != -1: self.nd[s] = min(self.nd[s],self.nd[y]) self.first[s].s_extend(self.first[y]) if self.nullable[y]: continue else: break else: break if self.nd[s] == d: while 1: y = self.ms.pop() if y == s: break self.first[y] = self.first[s].copy() self.nd[y] = -1 def FIRST_NT(self,s): """ Recursivelly computes C{FIRST(X)} for a nonterminal X""" if not self.ntr.has_key(s): return self.first[s] = osets.Set([]) for i in self.ntr[s]: r = self.rules[i][1] if r == []: self.nullable[s] = 1 else: e = 1 for y in r: if not self.first.has_key(y): self.FIRST_NT(y) self.first[s].s_extend(self.first[y]) if not self.nullable[y]: e = 0 break if e == 1: self.nullable[s] = 1 def FOLLOW(self): """computes C{FOLLOW(A)} for all nonterminals: the set of terminals a that can appear immediately to the right of A in some sentential form.""" self.follow = {} self.follow[self.start] = osets.Set([self.endmark]) for rule in self.rules: r = rule[1] for i in range(len(r)): if r[i] in self.nonterminals: if not self.follow.has_key(r[i]): self.follow[r[i]] = osets.Set([]) j = i + 1 self.follow[r[i]].s_extend(self.FIRST(r[j:])) e = 1 while e: e = 0 for s in self.nonterminals: for i in self.ntr[s]: r = self.rules[i][1] try: b = r[len(r)-1] if b in self.nonterminals and self.follow[b].s_extend(self.follow[s]): e = 1 except IndexError: pass except KeyError: pass for k in range(len(r)-1): j = k + 1 if r[k] in self.nonterminals and self.nullable[string.join(r[j:])]: if self.follow[r[k]].s_extend(self.follow[s]): e = 1 break def TransClose(self): """For each nonterminal C{s} determines the set of nonterminals a such that C{s ->* ar}, for some C{r}""" self.close_nt = {} self.nd = {} self.ms =Stack() for s in self.nonterminals: if self.ntr.has_key(s) and not self.close_nt.has_key(s): self.TRAVERSE(s,1) def TRAVERSE(self,s,d): """ """ self.ms.push(s) self.nd[s] = d """ calculating F1(s)""" self.close_nt[s] = {s:osets.Set([[]])} for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): if r[j+1:]: f = self.FIRST(r[j+1:]) ns = self.nullable[string.join(r[j+1:])] else: f = [] ns = 1 if r[j] in self.nonterminals: if not self.close_nt[s].has_key(r[j]): self.close_nt[s][r[j]] = osets.Set([[]]) if r[j+1:]: self.close_nt[s][r[j]].append((f,ns)) if not self.nullable[r[j]]: break else: break """reflexive tansitive closure""" for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): f = self.FIRST(r[j+1:]) ns = self.nullable[string.join(r[j+1:])] if r[j] in self.nonterminals: if not self.close_nt.has_key(r[j]): self.TRAVERSE(r[j],d+1) if self.nd.has_key(r[j]) and self.nd[r[j]] != -1: self.nd[s] = min(self.nd[s],self.nd[r[j]]) for k in self.close_nt[r[j]].keys(): if not self.close_nt[s].has_key(k): self.close_nt[s][k] = osets.Set([[]]) else: for v in self.close_nt[s][k]: if not v: self.close_nt[s][k].append((f,ns)) else: p, n = v if n: self.close_nt[s][k].append((p+f,ns)) else: self.close_nt[s][k].append((p,n)) if not self.nullable[r[j]]: break else: break if self.nd[s] == d: while 1: y = self.ms.pop() if y == s: break self.close_nt[y] = self.close_nt[s].copy() self.nd[y] = -1 def DERIVE_NT(self): """For each nonterminal C{s} determines the set of nonterminals a such that C{s ->* ar}, for some C{r}""" self.derive_nt = {} for s in self.nonterminals: if self.ntr.has_key(s) and not self.derive_nt.has_key(s): self.DERIVE_ONE_NT(s) def DERIVE_ONE_NT(self,s): """For nonterminal C{s} determines the set of nonterminals a such that C{s -> ar}, for some C{r} """ if not self.ntr.has_key(s): return self.derive_nt[s] = {s:osets.Set([None])} for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): if r[j] in self.nonterminals: if not self.derive_nt.has_key(r[j]): self.DERIVE_ONE_NT(r[j]) for k in self.derive_nt[r[j]].keys(): if not self.derive_nt[s].has_key(k): self.derive_nt[s][k] = osets.Set([]) for p in self.derive_nt[r[j]][k]: if not p : self.derive_nt[s][k].append(r[j+1:]) else: self.derive_nt[s][k].append(r[j+1:].append(p)) if not self.nullable[r[j]]: break else: break def DERIVE_T(self): """ """ self.derive_ter = {} for s in self.terminals: self.derive_ter[s] = osets.Set([s]) e = 1 while e: e = 0 for s in self.nonterminals: for i in self.ntr[s]: r = self.rules[i][1] if r == []: continue for i in range(len(r)): if r[i] in self.terminals: if i < len(r) -1: if self.derive_ter.has_key(r[i+1]): if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_append(r[i]): e = 1 break else: if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_append(r[i]): e = 1 break else: """ non-terminal""" if self.derive_ter.has_key(r[i]): if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_extend(self.derive_ter[r[i]]) == 1: e = 1 if i > 0 and self.nullable[r[i]]: continue else: break class LRtable: """Class for construction of a C{LR} table @ivar gr: a context-free grammar @ivar operators: operators @ivar Log: Log report for LR table construction """ def __init__(self,cfgr,operators=None,noconflicts=1,expect=0): """ @param cfgr: a context-free grammar @param operators: operators @param noconflicts: if 0 LRtable conflicts are not resolved, unless for spcecial operator rules @type noconflicts: integer @param expect: exact number of expected LR shift/reduce conflicts @type expect: integer """ self.gr = cfgr self.gr.makeFFN() self.operators = operators self.precedence = None # if self.operators: self.rules_precedence() self.Log=LogLR(noconflicts,expect) self.make_action_goto() def make_action_goto(self): """ make C{action[i,X]} and C{goto[i,X]} All pairs C{(i,s)} not in action and goto dictionaries are 'error' """ c = self.items() if _DEBUG: print self.print_items(c) self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i]: a = self.NextToDot(item) if a in self.gr.terminals: state = self.goto(c[i],a) try: j = c.index(state) self.add_action(i,a,'shift',j) except IndexError: if _DEBUG: print "no state" elif a == "": """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start : self.dotatend(item,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) try: j = c.index(state) self.GOTO[(i,s)] = j except ValueError: pass def rules_precedence(self): """Rule precedence obtained as the precedence of the right most terminal. """ self.precedence={} for i in range(len(self.gr.rules)): if len(self.gr.rules[i]) == 4: self.precedence[i] = self.gr.rules[i][3] else: self.precedence[i] = None if self.operators: self.gr.rules[i][1].reverse() for s in self.gr.rules[i][1]: if self.operators.has_key(s): self.precedence[i] = self.operators[s] break self.gr.rules[i][1].reverse() if _DEBUG: print "Precedence %s" %self.precedence def add_action(self,i,a,action,j): """Set C{(action,j)} for state C{i} and symbol C{a} or raise conflict error. Conficts are resolved using the following rules: - shift/reduce: if precedence/assoc information is available try to use it; otherwise conflict is resolved in favor of shift - reduce/reduce: choosing the production rule listed first """ if self.ACTION.has_key((i,a)) and self.ACTION[(i,a)] != (action,j): action1 , j1 = self.ACTION[(i,a)] if _DEBUG: print "LRconflit %s %s %s %s %s %s" %(action,j,action1,j1, i,a) if action1 == 'shift' and action == 'reduce': self.resolve_shift_reduce(i,a,j1,j) elif action == 'shift' and action1 == 'reduce': self.resolve_shift_reduce(i,a,j,j1) elif action == 'reduce' and action1 == 'reduce': if self.Log.noconflicts: # RESOLVED by choosing first rule if j > j1: self.ACTION[(i,a)] = (action,j1) else: self.ACTION[(i,a)] = (action,j) self.Log.add_conflict('rr',i,a,j1,j) else: raise LRConflictError(i,a) else: self.ACTION[(i,a)] = (action,j) def resolve_shift_reduce(self,i,a,s,r): """Operators precedence resolution or standard option: shift C{s}: rule for shift C{r}: rule for reduce """ try: if self.operators and self.operators.has_key(a) and self.precedence.has_key(r) and self.precedence[r]: prec_op, assoc_op = self.operators[a] if (self.precedence[r][0] > prec_op) or (self.precedence[r][0] == prec_op and self.precedence[r][1] =='left'): self.ACTION[(i,a)] = ('reduce',r) if _DEBUG: print "solved reduce %s" %r else: self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "solved shift %s" %s else: self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "solved shift %s" %s except (AttributeError, TypeError, KeyError,NameError): if self.Log.noconflicts: # choose to shift self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "choose shift %s for action (%s,%s)" %(s,i,a) self.Log.add_conflict('sr',i,a,s,r) if _DEBUG: print " %s for action (%s,%s)" %(self.Log.conflicts,i,a) else: raise LRConflictError(i,a) class SLRtable(LRtable): """Class for construction of a C{SLR} table C{SLR} items represented by a pair of integers C{(number of rule,position of dot)} (aho86:_compil page 221) """ def dotatend(self,item,i): n, k = item l = self.gr.rules[item[0]][0] for a in self.gr.follow[l]: self.add_action(i,a,'reduce',n) def closure(self,items): """The closure of a set of C{LR(0)} items C{I} is the set of items constructed from C{I} by the two rules: - every item of I is in closure(I) - If A -> s.Bt in closure(I) and B -> r, then add B ->.r to closure(I) (aho86:_compil page 223) """ added = {} for l in self.gr.nonterminals: added[l] = 0 close = items[:] e = 1 while e: e = 0 for i in close: s = self.NextToDot(i) if s in self.gr.nonterminals and added[s]==0 and self.gr.ntr.has_key(s): for n in self.gr.ntr[s]: close.append((n,0)) added[s] = 1 e = 1 return close def goto(self,items,s): """ goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items A -> sX.r such that A -> s.Xr is in I""" valid = osets.Set([]) for item in items: if self.NextToDot(item) == s: n, i = item valid.append((n, i + 1)) return self.closure(valid) def items(self): """ An LR(0) item of a grammar G is a production of G with a dot at some position on the right hand side. It is represented by the rule number and the position of the dot @return: a set of sets of items """ c = osets.Set([self.closure(osets.Set([(len(self.gr.rules) - 1,0)]))]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: valid = self.goto(i,s) if valid != [] and valid not in c: c.append(valid) e = 1 return c def print_items(self,c): """Print SLR items """ s = "" j = 0 for i in c: s = s+ "I_%d: \n" %j for item in i: r, p = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s \n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," ")) j += 1 return s def NextToDot(self,item): """ returns symbol next to te dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s class LR1table(LRtable): """ Class for construction of a LR1 table Items are represented by a pair of integers (number of rule, position of dot) """ def closure(self,items): """The closure of a set of C{LR(1)} items C{I} is the set of items construted from I by the two rules: - every item of C{I} is in C{closure(I)} - If C{[A -> s.Bt,a]} in C{closure(I)},for C{B ->r} and each terminal C{b} in C{first(ta)}, add C{[B ->.r,b]} to C{closure(I)} """ close = items e = 1 while e: e = 0 for i in close: s = self.NextToDot(i) sa = self.gr.FIRST(self.AfterDot(i)) if s in self.gr.nonterminals and self.gr.ntr.has_key(s): for n in self.gr.ntr[s]: for b in sa: e = close.append((n,0,b)) return close def goto(self,items,s): """ goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items (A -> sX.r,a) such that (A -> s.Xr,a) in I""" valid = osets.Set([]) for item in items: if self.NextToDot(item) == s: n, i, t = item valid.append((n, i + 1,t)) return self.closure(valid) def items(self): """ An LR(1) item of a grammar G is a production of G with a dot at some position of the right hand side and a terminal: (rule_number,dot_position,terminal) (aho86:_compil page 231) """ c = osets.Set([ self.closure(osets.Set([(len(self.gr.rules) - 1,0,self.gr.endmark)]))]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: valid=self.goto(i,s) if valid != [] : if c.s_append(valid): e = 1 return c def print_items(self,c): """Print C{LR(1)} items """ s = "" j = 0 for i in c: s = s+ "I_%d: \n" %j for item in i: r, p, t = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s , %s\n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," "),t) j += 1 print s return s def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i, t = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDot(self,item): """ returns symbol next to the dot or empty string""" n, i, t = item try: s = self.gr.rules[n][1][i+1:] except IndexError: s = [] s.append(t) return s def dotatend(self,item,i): n, k, t = item self.add_action(i,t,'reduce',n) class LALRtable1(LRtable): """Class for construction of C{LALR(1)} tables""" def make_action_goto(self): """ Make C{action[i,X]} and C{goto[i,X]} all pairs C{(i,s)} not in action and goto dictionaries are 'error' """ self.gr.DERIVE_NT() c = self.items() if _DEBUG: print self.print_items(c) self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i].keys(): a = self.NextToDot(item) if a in self.gr.terminals: state =self.goto(c[i],a) j = self.get_union(c,state) if j != -1: self.add_action(i,a,'shift',j) elif a == "": """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start : self.dotatend(item,c,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) j = self.get_union(c,state) if j != -1: self.GOTO[(i,s)] = j def items(self): """ An C{LALR(1)} item of a grammar C{G} is a production of C{G}with a dot at some position of the right hand side and a list of terminals: is coded as a dictonary with key C{(rule_number,dot_position)} and value a set of terminals """ i0 = {} i0[(len(self.gr.rules) - 1,0)] = osets.Set([self.gr.endmark]) c = osets.Set([self.closure(i0)]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: if self.core_merge(c,self.goto(i,s)) == 1: e = 1 return c def print_items(self,c): """Print C{LALR(1)} items """ s = "" j = 0 for i in range(len(c)): s = s+ "I_%d: \n" %i for item in c[i].keys(): r, p = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s, %s \n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," "),c[i][item]) print s return s def goto(self,items,s): """ C{goto(I,X)} where C{I} is a set of items and C{X} a grammar symbol is the closure of the set of all items C{(A -> sX.r,a)} such that C{(A -> s.Xr,a)} in C{I}""" valid = {} for (n,i) in items.keys(): if self.NextToDot((n,i)) == s: if not valid.has_key((n,i+1)): valid[(n,i + 1)] = osets.Set([]) for t in items[(n,i)]: valid[(n, i + 1)].append(t) return self.closure(valid) def closure(self,items): """The closure of a set of C{LR(1)} items I is the set of items construted from I by the two rules: - every item of I is in closure(I) - If [A -> s.Bt,a] in closure(I),for B ->r and each terminal b in first(ta), add [B ->.r,b] to closure(I) """ e = 1 while e: e = 0 for i in items.keys(): s = self.NextToDot(i) if s in self.gr.nonterminals and self.gr.ntr.has_key(s): l = self.AfterDot(i,items) for n in self.gr.ntr[s]: if not items.has_key((n,0)): items[(n,0)] = osets.Set([]) if items[(n,0)].s_extend(l) == 1 : e = 1 return items def get_union(self,c,j): """ """ for i in c: if i.keys() == j.keys(): return c.index(i) return -1 def core_merge(self,c,j): """ """ if j == {} or j in c : return 0 e = 2 for i in c: if i.keys() == j.keys(): e = 0 for k in j.keys(): if i[k].s_extend(j[k]) == 1: e = 1 break if e == 2: e = c.s_append(j) return e def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDot(self,item,items): """ returns FIRST of strings after the dot concatenated with lookahead""" n, i = item try: s = self.gr.rules[n][1][i+1:] except IndexError: s = [] sa = osets.Set([]) for a in items[item]: s.append(a) sa.s_extend(self.gr.FIRST(s)) del s[len(s)-1] return sa def dotatend(self,item,c,i): n, k = item for a in c[i][item]: self.add_action(i,a,'reduce',n) class LALRtable(LALRtable1): """Class for construction of LALR tables """ def make_action_goto(self): """ collection of LR(0) items """ self.gr.DERIVE_T() self.gr.TransClose() c = self.items() if _DEBUG: print self.print_items(c) """ make action[i,X] and goto[i,X] all pairs (i,s) not in action and goto dictionaries are 'error' """ self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i].keys(): C = self.NextToDot(item) if C in self.gr.nonterminals: if self.gr.derive_ter.has_key(C): for a in self.gr.derive_ter[C]: if self.goto_ref.has_key((i,a)): j = self.goto_ref[(i,a)] self.add_action(i,a,'shift',j) if self.gr.close_nt.has_key(C): for A in self.gr.close_nt[C].keys(): """Error: ignores end string s in C->*As""" for p in self.gr.close_nt[C][A]: r = self.AfterDotTer(item,c[i],p) if self.gr.ntr.has_key(A): for k in self.gr.ntr[A]: if self.gr.rules[k][1] == []: for a in r: self.add_action(i,a,'reduce',k) elif C in self.gr.terminals: if self.goto_ref.has_key((i,C)): j = self.goto_ref[(i,C)] self.add_action(i,C,'shift',j) else: """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start: self.dotatend(item,c,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) j = self.get_union(c,state) if j != -1: self.GOTO[(i,s)] = j def items(self): """ An C{LALR(1)} kernel item of a grammar C{G} is a production of C{G} with a dot at some position of the right hand side (except the first) and a list of terminals: is coded as a dictionary with key C{(rule_number,dot_position)} and value a set of terminals. """ i0 = {} i0[(len(self.gr.rules) - 1,0)] = osets.Set([self.gr.endmark]) c= osets.Set([i0]) symbols = self.gr.terminals + self.gr.nonterminals """ kernel LR(0) items """ self.goto_ref = {} e = 1 while e: e = 0 for i in c: for s in symbols: valid = self.goto(i,s) if valid != {}: if c.s_append(valid): e = 1 self.goto_ref[(c.index(i),s)] = c.index(valid) """ Discovering propagated and spontaneous lookaheads for kernel items k and grammar symbol s""" lh={} for k in c: nk = c.index(k) lh[nk] = {} #osets.Set([]) for (n,i) in k.keys(): lh[nk][(n,i)] = osets.Set([]) j = {} j[(n,i)]=osets.Set([(self.gr.dummy)]) j = self.closure(j) for s in symbols: for (m1,j1) in j.keys(): if self.NextToDot((m1,j1)) == s: for a in j[(m1,j1)]: if a == self.gr.dummy: lh[nk][(n,i)].append((self.goto_ref[(nk,s)],m1,j1+1)) else: c[self.goto_ref[(nk,s)]][(m1,j1+1)].append(a) del j """ Propagate lookaheads """ # c[0][(len(self.gr.rules) - 1,0)].s_append(self.gr.endmark) e = 1 while e: e = 0 for k in c: nk = c.index(k) for (n,i) in k.keys(): for (m,n1,i1) in lh[nk][(n,i)]: if c[m][(n1,i1)].s_extend(k[(n,i)]) == 1: e = 1 return c def goto(self,items,s): """ C{goto(I,X)} where I is a set of kernel items and X a grammar symbol is the closure of the set of all items (A -> sX.r,a) such that (A -> s.Xr,a) is in I""" valid = {} for (n,i) in items.keys(): x = self.NextToDot((n,i)) if x == s: if not valid.has_key((n,i+1)): valid[(n,i + 1)] = osets.Set([]) if self.gr.close_nt.has_key(x): for a in self.gr.close_nt[x].keys(): if self.gr.ntr.has_key(a): for k in self.gr.ntr[a]: if self.gr.rules[k][1] != [] and self.gr.rules[k][1][0] == s: valid[(k,1)] = osets.Set([]) return valid def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDotTer(self,item,items,path): """ returns FIRST of strings after the dot concatenated with lookahead""" if path: p, n = path if not n: return p l, i = item try: f= self.gr.FIRST(self.gr.rules[l][1][i+1:]) ns = self.gr.nullable[string.join(self.gr.rules[l][1][i+1:])] except IndexError: f = [] ns = 1 if ns: return items[item] else: return f class LogLR: """Class for LR table construction report: @ivar expect: number of shit/reduce conflicts expected @type expect: integer @ivar items: set of LR items @ivar conflicts: dictionary of conflicts occurred in LR table construction: 'rr' and 'sr' """ def __init__(self,noconflicts,expect): self.noconflicts = noconflicts self.expect = expect self.conflicts = {} self.items = None def add_conflict(self,type,i,a,value1,value2): try: self.conflicts[type].append((i,a,value1,value2)) except KeyError: self.conflicts[type] = [(i,a,value1,value2)] class LRparser: """Class for LR parser @ivar cfgr: context free grammar @ivar rules: grammar rules @ivar terminals: grammar terminals @ivar nonterminals: grammar nonterminals @ivar table: LR parsing table @ivar ACTION: Action function @ivar GOTO: Goto function @ivar tokens: tokens to be parsed @ivar context: computational context @ivar output: list of grammar rules used for parsing C{tokens} (right derivation in reverse) @ivar stack: LR stack with pairs C{(state,token)} """ def __init__(self,grammar,table_shelve,no_table=1,tabletype=LALRtable,operators=None,noconflicts=1,expect=0,**args): """ @param grammar: is a list for productions; each production is a tuple C{(LeftHandside,RightHandside,SemFunc,Prec?)} with C{LeftHandside} nonterminal, C{RightHandside} list of symbols, C{SemFunc} syntax-direct semantics, if present C{Prec (PRECEDENCE,ASSOC)} for ambiguous rules First production is for start symbol @param table_shelve: file where parser is saved @type table_shelve: string @param tabletype: type of LR table: C{SLR}, C{LR1}, C{LALR} @type tabletype: LRtable class @param no_table: if 0 table_shelve is created anyway @type no_table: integer @param operators: precedence and associativity for operators @type operators: dictionary @param noconflicts: if 0 LRtable conflicts are not resolved, unless spcecial operator rules @type noconflicts: integer @param expect: exact number of expected LR shift/reduce conflicts @type expect: integer @param args: extra arguments; key C{nosemrules} if 1 no semantic rules are applied @type args: dictionary """ self.cfgr = CFGrammar(grammar) self.rules = self.cfgr.rules self.terminals = self.cfgr.terminals self.nonterminals = self.cfgr.nonterminals self.endmark = self.cfgr.endmark if args.has_key('nosemrules'): self.nosemrules=args['nosemrules'] else: self.nosemrules = 0 db=whichdb.whichdb(table_shelve) if not(db==None or db=="" or no_table==0): try: d = shelve.open(table_shelve,'w') self.ACTION = d['action'] self.GOTO = d['goto'] if d.has_key('version'): if d['version'] < _Version: raise TableError(table_shelve) try: self.Log = d['log'] except KeyError: raise TableError(table_shelve) d.close() except Exception: if os.access(table_shelve,os.W_OK): os.remove(table_shelve) else: raise TableError(table_shelve) else: d = shelve.open(table_shelve,'n') self.table = tabletype(self.cfgr,operators,noconflicts,expect) d['version'] = _Version d['action'] = self.ACTION = self.table.ACTION d['goto'] = self.GOTO = self.table.GOTO d['log'] = self.Log = self.table.Log d.close() def __str__(self): """@return: the LR parsing table showing for each state the action and goto function """ l = (map(lambda x: x[0],self.ACTION.keys())) l.sort() a1="\nState\n" if len(self.terminals) < 20: for a in self.terminals: a1=a1+" \t%s" %a for i in osets.Set(l): a3="\n%s" % i for a in self.terminals: if self.ACTION.has_key((i,a)): if self.ACTION[i,a][0]=="shift": x="s" else: x="r" a2="\t%s%s" %(x,self.ACTION[i,a][1]) else: a2="\t" a3=a3+a2 a1="%s%s" %(a1,a3) ac=a1 else: for i in osets.Set(l): a3="%s\n" % i for a in self.terminals: if self.ACTION.has_key((i,a)): if self.ACTION[i,a][0]=="shift": x="s" else: x="r" a3= a3+"%s = %s%s\n" %(a,x,self.ACTION[i,a][1]) a1="%s%s" %(a1,a3) ac=a1 l = (map(lambda x: x[0],self.GOTO.keys())) l.sort() a1 = "\nState\n" if len(self.nonterminals) < 20: for a in self.nonterminals: a1 = a1 + " \t%s" %a for i in osets.Set(l): a3 = "\n%s" % i for a in self.nonterminals: if self.GOTO.has_key((i,a)): a2 = "\t%s" %self.GOTO[(i,a)] else: a2 = "\t" a3 = a3 + a2 a1 = "%s%s" %(a1,a3) else: for i in osets.Set(l): a3 = "%s\n" % i for a in self.nonterminals: if self.GOTO.has_key((i,a)): a3 = a3 + "%s = %s\n" %(a,self.GOTO[(i,a)]) a1 = "%s%s" %(a1,a3) go = a1 return "Action table:\n %s\n Goto table:%s\n" % (ac,go) def parsing(self,tokens,context = None): """LR Parsing Algorithm (aho86:_compil, page 218) @param tokens: pairs (TOKEN, SPECIAL_VALUE) @param context: a computational context for semantic actions @return: parsed result """ self.stack = Stack() self.stack.push((0,[])) self.tokens = tokens self.tokens.append((self.endmark,self.endmark)) self.context = context self.output = [] self.ip = 0 while 1: s = self.stack.top()[0] a = self.tokens[self.ip][0] if _DEBUG: print "Input: %s\nState: %s" %(map(lambda x:x[0],self.tokens[self.ip:]),s) print "Stack: %s" %self.stack try: if self.ACTION[s,a][0] == 'shift': if _DEBUG: print "Action: shift\n" self.stack.push((self.ACTION[s,a][1], self.tokens[self.ip][1])) self.ip = self.ip + 1 elif self.ACTION[s,a][0] == 'reduce': n = self.ACTION[s,a][1] if _DEBUG: print "Action: reduce %s %s\n" %(n,str(self.rules[n])) semargs = [self.stack.pop()[1] for i in range(len(self.rules[n][1]))] semargs.reverse() if self.nosemrules: reduce = [] else: reduce = Reduction(self.rules[n][2],semargs,self.context) del semargs s1 = self.stack.top()[0] a = self.rules[n][0] self.stack.push((self.GOTO[s1,a],reduce)) self.output.append(n) elif self.ACTION[s,a] == ('accept', []): break else: raise LRParserError(s,a) except KeyError: if _DEBUG: print "Error in action: %s" %self.ACTION raise LRParserError(s,a) except SemanticError, m: if _DEBUG: print "Semantic Rule %d %s" %(n,self.rules[n][2]) raise SemanticError(m,n,self.rules[n][2]) return self.stack.top()[1] def parse_grammar(self,st,context,args): """ Transforms a string into a grammar description @param st: is a string representing the grammar rules, with default symbols as below. Fisrt rule for start. I{Example}:: reg -> reg + reg E{lb}E{lb} self.OrSemRule E{rb}E{rb} // priority 'left'| ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; where: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS rule""" self.pg=Yappy_grammar(**args) self.pg.input(st,context) return self.pg.context['rules'] def gsrules(self,rulestr, **sym): """ Transforms a string in a grammar description @param rulestr: is a string representing the grammar rules, with default symbols as below. @param sym: Dictionary with symbols used. Default ones: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS rule Example: reg -> reg + reg E{lb}E{lb} self.OrSemRule // (priority,'left') E{rb}E{rb} | ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; """ if not sym: sym = Dict(rulesym="->", rhssep='', opsym='//', semsym='{{', csemsym='}}', rulesep='|', ruleend=';') gr = [] rl = string.split(rulestr,sym['ruleend']) for l in rl: m = re.compile(sym['rulesym']).search(l) if not m: continue else: if m.start() == 0: raise GrammarError(l) else: lhs = l[0:m.start()].strip() if m.end() == len(l): raise GrammarError(l) else: rhss = string.strip(l[m.end():]) if rhss == "[]": rhs = [] sem = EmptySemRule op = None else: rhss = string.split(l[m.end():],sym['rulesep']) for rest in rhss: rest=string.strip(rest) if rhss == "[]": rhs = [] sem = EmptySemRule op = None else: m=re.search(sym['semsym']+'(?P.*)'+sym['csemsym'],rest) if not m: rhs = string.split(rest,None) sem = DefaultSemRule op = None else: if m.start() == 0: raise GrammarError(rest) else: rhs = string.split(rest[0:m.start()].strip()) if m.group('opsem'): opsem = string.split(m.group('opsem'),sym['opsym']) if len(opsem) == 1: sem = string.strip(opsem[0]) op = None elif len(opsem) == 2: sem = string.strip(opsem[0]) op = string.strip(opsem[1]) else: raise GrammarError(rest) else: sem = DefaultSemRule op = None if op == None: gr.append((lhs,rhs,eval(sem))) else: gr.append((lhs,rhs,eval(sem),eval(op))) return gr class LRBuildparser: """Class for LR parser: without shelve and semantic rules(obsolete) """ def __init__(self,grammar): """ """ self.table = LALRtable(grammar) def parsing(self,tokens): """LR Parsing Algorithm """ self.stack = Stack() self.stack.push(0) self.input = tokens self.input.append(self.table.gr.endmark) self.output = [] self.ip = 0 while 1: s = self.stack.top() a = self.input[self.ip] if not self.table.ACTION.has_key((s,a)): raise LRParserError(s,a) elif self.table.ACTION[s,a][0] == 'shift': # self.stack.push(a) self.stack.push(self.table.ACTION[s,a][1]) self.ip = self.ip + 1 elif self.table.ACTION[s,a][0] == 'reduce': n = self.table.ACTION[s,a][1] for i in range(len(self.table.gr.rules[n][1])): self.stack.pop() s1 = self.stack.top() a = self.table.gr.rules[n][0] # self.stack.push(a) if not self.table.GOTO.has_key((s1,a)): raise LRParserError(s1,a) else: self.stack.push(self.table.GOTO[s1,a]) self.output.append(n) elif self.table.ACTION[s,a] == ('accept', []): break else: raise LRParserError() ############# Auxiliares ################## def Dict(**entries): """Create a dict out of the argument=value arguments""" return entries def grules(rules_list,rulesym="->",rhssep=None): """ Transforms a list of rules in a grammar description. If a rule has no semantic rules, C{DefaultSemRule} is assumed. @param rules_list: is a list of pairs (rule,sem) where rule is a string of the form: - Word rulesym Word1 ... Word2 - Word rulesym [] @param rulesym: LHS and RHS rule separator @param rhssep: RHS values separator (None for white chars) @return: a grammar description """ gr = [] sep = re.compile(rulesym) for r in rules_list: if type(r) is StringType: rule = r else: rule = r[0] m = sep.search(rule) if not m: continue else: if m.start() == 0: raise GrammarError(rule) else: lhs = rule[0:m.start()].strip() if m.end() == len(rule): raise GrammarError(rule) else: rest=string.strip(rule[m.end():]) if rest == "[]": rhs = [] else: rhs = string.split(rest,rhssep) if type(r) is StringType: gr.append((lhs,rhs,DefaultSemRule)) elif len(r)==3: gr.append((lhs,rhs,r[1],r[2])) elif len(r)==2: gr.append((lhs,rhs,r[1])) else: raise GrammarError(r) return gr ####################################################### class Yappy(LRparser): """ A basic class for parsing. @ivar lex: a Lexer object """ def __init__(self,tokenize,grammar, table='YappyTab',no_table=1, tabletype=LALRtable,noconflicts=1,expect=0,**args): """@param tokenize: same as for L{Lexer} @param grammar: if a string C{parse_grammar} is called @param table: and no_table, tabletype same as for L{LRparser} @param args: dictionary where: - key C{tmpdir} is the directory where the parse table used by the Yappy Grammar is stored; - key C{usrdir} is the directory where the user tables are stored - key C{nosemrules} if 1 semantic actions are not applied""" self.lex = Lexer(tokenize) operators = None if self.lex.__dict__.has_key("operators"): operators = self.lex.operators if type(grammar) is StringType: grammar = self.parse_grammar(grammar,{'locals':locals()},args) if args.has_key('usrdir') and os.path.isdir(args['usrdir']): table = string.rstrip(args['usrdir']) + '/' + table if os.path.dirname(table)=="" or os.path.exists(os.path.dirname(table)): LRparser.__init__(self,grammar,table,no_table,tabletype,operators,noconflicts,expect,**args) else: sys.stderr.write("Directory %s do not exist\n" %table) sys.exit() if (self.Log.noconflicts and ((self.Log.conflicts.has_key('sr') and len(self.Log.conflicts['sr'])!= self.Log.expect) or self.Log.conflicts.has_key('rr'))): print "LR conflicts: number %s value %s" %(len(self.Log.conflicts['sr']),self.Log.conflicts) print """If it is Ok, set expect to the number of conflicts and build table again""" def input(self,str=None,context={},lexer=0): """ Reads from stdin or string and retuns parsed result @param str: String to be parsed. If not given, reads from C{stdin}. @param context: some initial computational context @param lexer: if 1 only lexical analisys is performed @return: a tuple C{(parsed result,context)} or only the C{parsed result} """ if str: self.tokens = self.lex.scan(str) else: print "Input: ", self.tokens = self.lex.readscan() if lexer: return self.tokens self.context = context return self.parsing(self.tokens,self.context) def inputfile(self,FileName,context={}): """Reads input from file """ try: file = open(FileName,"r") except IOError: raise YappyError() return self.input(file.read(),context) def parse_tree(self): """To be defined using output""" pass def test(self): """A test for each class""" pass ######### Semantic Grammar Rules ############## def expandSemRule(strargs,strfun): regargs = re.compile(r'\$(\d+)') matchargs = regargs.finditer(strfun) for i in [(x.group(0),strargs+x.group(1)+"]") for x in matchargs]: strfun = string.replace(strfun,i[0],i[1]) return strfun def Reduction(fun,sargs,context={}): """Reduction function for semantic rules: - C{fun} can be: -- a function -- or a string with positional arguments C{$n} that is expanded and evaluated with C{eval} """ if callable(fun): return apply(fun,[sargs, context]) elif type(fun) is StringType: a = expandSemRule("sargs[",fun) l = context.get('locals',{}) l.update(locals()) return eval(a,context.get('globals',{}),l) else: raise SemanticError,'Wrong type: %s' %fun def DefaultSemRule(sargs,context={}): """Default semantic rule""" return sargs[0] def EmptySemRule(sargs,context={}): return [] ######Parser f,grammars ################## class Yappy_grammar(Yappy): """ A parser for grammar rules. See C{test()} for an example. """ def __init__(self,no_table=1, table='yappypar.tab',tabletype=LR1table,**args): grammar= grules([ ("G -> RULE G",self.GRule), ("G -> []",EmptySemRule), ("RULE -> ID rulesym MULTI ruleend",self.RULERule) , ("MULTI -> RHS rulesep MULTI",self.MULTIRule), ("MULTI -> RHS",self.MULTIRule), ("RHS -> []",EmptySemRule), #RHS->OPSEM not allowed; epsilon-rule ("RHS -> RH OPSEM",self.RHSRule), ("RH -> ID RH",self.RHRule), ("RH -> ID",self.RHRule), ("OPSEM -> []",self.OPSEMRule), # ("OPSEM -> semsym ID csemsym",self.OPSEMRule),#OPSEM->OP not allowed # ("OPSEM -> semsym ID OP csemsym",self.OPSEMRule), ("OPSEM -> IDS",self.OPSEMRule1), ("OPSEM -> IDS OP",self.OPSEMRule1), ("OP -> opsym OPV",self.OPRule), ("OPV -> ID ID ", self.OPVRule) ]) tokenize = [ ("\{\{.*\}\}",lambda x: ("IDS",string.strip(x[2:-2]))), ("\s+",""), ("->",lambda x: ("rulesym",x)), ("\|",lambda x: ("rulesep",x)), (";",lambda x: ("ruleend",x)), # ("}}",lambda x: ("csemsym",x)), # ("{{",lambda x: ("semsym",x)), ("//",lambda x: ("opsym",x)), (".*",lambda x: ("ID",x))] if args.has_key('tmpdir'): args1 = {'usrdir':string.rstrip(args['tmpdir'],'/')} else: args1 = {} Yappy.__init__(self,tokenize,grammar,table,no_table,**args1) def OPVRule(self,arg,context): """ """ try: int(arg[0]) except ValueError: raise SemanticError("Precedence must be an integer: %s given" %arg[0]) if arg[1]!= 'left' and arg[1]!= 'right' and arg[1]!= 'noassoc': raise SemanticError("Associativity must be 'left' or 'right' or 'noassoc': %s\ given" %arg[1]) return (int(arg[0]),arg[1]) def OPRule(self,arg,context): return arg[1] def OPSEMRule(self,arg,context): if len(arg) == 4: return (arg[1],arg[2]) if len(arg) == 3: return arg[1] if len(arg) == 0: return 'DefaultSemRule' def OPSEMRule1(self,arg,context): if len(arg) == 2: return (arg[0],arg[1]) if len(arg) == 1: return arg[0] if len(arg) == 0: return 'DefaultSemRule' def RHRule(self,arg,context): if len(arg) == 1: return [arg[0]] if len(arg) == 2: return [arg[0]]+arg[1] def RHSRule(self,arg,context): return (arg[0],arg[1]) def MULTIRule(self,arg,context): if len(arg) == 1: return [arg[0]] else: return [arg[0]]+arg[2] def RULERule(self,arg,context): lhs=arg[0] def grule(self,l): if l == []: return (lhs,[],EmptySemRule) if type(l[1]) is TupleType: return (lhs,l[0],eval(l[1][0],globals(),context['locals']),l[1][1]) else: return (lhs,l[0],eval(l[1],globals(),context['locals'])) return map(lambda l:grule(self,l) ,arg[2]) def GRule(self,args,context): if context.has_key('rules'): context['rules']= args[0]+context['rules'] else: context['rules'] = args[0] return [] def test(self): st = """ reg -> reg + reg {{DefaultSemRule}} // 200 left | reg reg {{DefaultSemRule}} // 200 left | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{lambda l,c:l[0]}}; reg -> ; a -> reg | reg ; """ st1 = """ reg -> reg + reg {{DefaultSemRule // 200 left}} | reg reg {{DefaultSemRule // 200 left}} | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{DefaultSemRule}}; reg -> ; a -> reg | reg ; """ self.input(st,{'locals':locals()}) return self.context['rules'] class Stack: """ A simple class to implement stacks""" def __init__(self, start=[]): """Reverse initial stack objects""" self.stack = [] for x in start: self.push(x) self.stack.reverse() def push(self, object): self.stack = [object] + self.stack def pop(self): if not self.stack: raise StackUnderflow() top, self.stack = self.stack[0], self.stack[1:] return top def top(self): """ Returns top of stack (not poping it)""" if not self.stack: raise StackUnderflow() return self.stack[0] def empty(self): """ Tests if stack is empty""" return not self.stack def popall(self): """ Empties stack""" self.stack=[] def __repr__(self): return '[Stack:%s]' % self.stack def __cmp__(self, other): return cmp(self.stack, other.stack) def __len__(self): return len(self.stack) def __add__(self, other): return Stack(self.stack + other.stack) def __mul__(self, reps): return Stack(self.stack * reps) def __getitem__(self, offset): return self.stack[offset] def __getslice__(self, low, high): return Stack(self.stack[low : high]) def __getattr__(self, name): return getattr(self.stack, name) yappy-1.9.4/yappy/parser.pyc0000640000076600000240000021657011404125030014340 0ustar dialout ȃJc@sdZddkTddkZddkZddkZddkZddkZddkZddkZddkZddk Zddk Z ddk Z ddk Z ddk Z dZdZdZdfdYZd efd YZd efd YZd efdYZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdefdYZdfdYZdfd YZd!efd"YZd#efd$YZ d%efd&YZ!d'e!fd(YZ"d)fd*YZ#d+fd,YZ$d-fd.YZ%d/Z&d0e'd1Z(d2e$fd3YZ)d4Z*hd5Z+hd6Z,hd7Z-d8e)fd9YZ.d:fd;YZ/dS(<s This is part of Yappy parser.py -- Yet another parser for python... A LR parser generator, based on Aho and al. 1986, C{Compilers} (aho86:_compil). It currently builds C{SLR}, C{LR(1)} and C{LALR(1)} parsing tables. Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt Version: $Id: parser.py,v 1.18 2006-07-19 09:52:06 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. @author: Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt @var _DEBUG: if nonzero, display information during parser generation or parsing. @type _DEBUG: integer i(t*Nis1.9.3ttLexercBs;eZdZdZdZdZdZdZRS(sClass for lexical analyser to use with the parser @ivar rules: lexical rules @ivar operators: precedence and associativity for operators @type operators: dictionary c CsZg|_d}x|D]}y|d}|d}Wn"tj ot||nXyti|}Wn"tj ot||nXya|d\}}} |iidp h|_ n|i i|p|| f|i |>> Several confliting actions. Please consult self.Log for details(R4(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRscCs d|iS(Ns%s(R4(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5s(R0R1R2RR5(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR>s t LRParserErrorcBs eZdZdZdZRS(s&An error occured in LR parsing programcCs/||_||_d|i|if|_dS(NsError in LR: (%s,%s) not found(R9R:R4(RR*R<((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCs d|iS(Ns%s(R4(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5s(R0R1R2RR5(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR?s t SemanticErrorcBs&eZdZdddZdZRS(s8An error occured in the application of a semantic actionicCs||_||_||_dS(N(R4tnruleR'(RR,tnR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCsd|i|i|ifS(Ns%s in semantic rule %d: %s(R4RAR'(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5sN(R0R1R2tNoneRR5(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR@st TableErrorcBs eZdZdZdZRS(sMismatch table version cCsd||_dS(NsUA new table must be built. Please remove table shelve %s or set no_table to 0(R4(Rtt((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRscCs d|iS(Ns%s(R4(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5s(R0R1R2RR5(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRDs t CFGrammarcBseZdZdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZdZdZRS(s( Class for context-free grammars @ivar rules: grammar rules @ivar terminals: terminals symbols @ivar nonterminals: nonterminals symbols @ivar start: start symbol @type start: string @ivar ntr: dictionary of rules for each nonterminal cCsc||_|i|i|idd|_d|_|ii|i|igtfd|_d|_|i i|i|i i|i|i i|ig|_ h|_ d}x|iD]}|i i |dp|g|i |d %s t sGrammar Rules: %s(RSR&RRtjoin(RR*RBtlhstrhs((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR59s'cCs"|i|i|idS(N(tNULLABLEt FIRST_ONEtFOLLOW(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytmakeFFNEs  cCsjg|_xZ|iD]O}xF|dD]:}||ijo$||ijo|ii|q$q$WqWdS(sPExtracts C{terminals} from the rules. C{nonterminals} must already existiN(RORRPR (RRR*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRJJs    cCsMg|_x=|iD]2}|d|ijo|ii|dqqWdS(s-Extracts C{nonterminals} from grammar rules.iN(RPRR (RR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRISs   c CsFh|_x|iD]}d|i|* []} iiN(tnullableRORPRRR R(RR*R;tkRte((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRZZs8          cCstig}d}xOtt|D];}|i|i|||i||p d}Pq(q(W|djod|iti|h|_ti|ig|i|i* ar}, for some C{r}iN(tclose_ntRfRgRhRPRRR tTRAVERSE(RR*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt TransCloses    'c CsI|ii|||i|}|ii|o%|ii| o|i|qqWdS(skFor each nonterminal C{s} determines the set of nonterminals a such that C{s ->* ar}, for some C{r}N(t derive_ntRPRRR t DERIVE_ONE_NT(RR*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt DERIVE_NT7s   'cCs|ii|pdSnhtidg|<|i| ar}, for some C{r} Ni(RRR RaRbRCR|RRSR&RPR}RwR R^(RR*R;RRUR_R{((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR}?s2"$1 c Cs\h|_x*|iD]}ti|g|i|     $ cCsh|_xtt|iiD]}t|ii|djo|ii|d|i|H  N( R0R1R2RCRRRRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  $  tSLRtablecBsDeZdZdZdZdZdZdZdZRS(sClass for construction of a C{SLR} table C{SLR} items represented by a pair of integers C{(number of rule,position of dot)} (aho86:_compil page 221) cCsY|\}}|ii|dd}x.|ii|D]}|i||d|q5WdS(NiR(RRRqR(RR9R;RBR_RR<((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  c Csh}x|iiD]}d|| s.Bt in closure(I) and B -> r, then add B ->.r to closure(I) (aho86:_compil page 223) ii(RRPRRRR R ( RRtaddedRtcloseR`R;R*RB((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytclosure#s& : cCsjtig}xK|D]C}|i||jo'|\}}|i||dfqqW|i|S(s goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items A -> sX.r such that A -> s.Xr is in Ii(RaRbRR R(RRR*tvalidR9RBR;((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR:s cCsti|itit|iiddfgg}|ii|ii}d}xw|ood}xb|D]Z}xQ|D]I}|i||}|gjo$||jo|i |d}q}q}WqpWq\W|S(s  An LR(0) item of a grammar G is a production of G with a dot at some position on the right hand side. It is represented by the rule number and the position of the dot @return: a set of sets of items ii( RaRbRR&RRRORPRR (RRtsymbolsR`R;R*R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyREs= c Csd}d}x|D]}|d|}xy|D]q}|\}}|ii|d}|ii|d} |d|ti| | dti| |df}q.W|d7}qW|S(sPrint SLR items RisI_%d: is %s -> %s . %s RV(RRRRW( RRR*RUR;R9RR{RXRY((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRZs  2cCsH|\}}y|ii|d|}Wntj o d}nX|S(s. returns symbol next to te dot or empty stringiR(RRR(RR9RBR;R*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRis   ( R0R1R2RRRRRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs    tLR1tablecBsMeZdZdZdZdZdZdZdZdZ RS(s Class for construction of a LR1 table Items are represented by a pair of integers (number of rule, position of dot) c Cs|}d}x|od}x|D]}|i|}|ii|i|}||iijo^|iii|oHxE|ii|D]/}x&|D]}|i|d|f}qWqWq#q#WqW|S(sVThe closure of a set of C{LR(1)} items C{I} is the set of items construted from I by the two rules: - every item of C{I} is in C{closure(I)} - If C{[A -> s.Bt,a]} in C{closure(I)},for C{B ->r} and each terminal C{b} in C{first(ta)}, add C{[B ->.r,b]} to C{closure(I)} ii(RRRetAfterDotRPRRR R ( RRRR`R;R*tsaRBRs((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR{s )-cCsptig}xQ|D]I}|i||jo-|\}}}|i||d|fqqW|i|S(s goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items (A -> sX.r,a) such that (A -> s.Xr,a) in Ii(RaRbRR R(RRR*RR9RBR;RE((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs"cCsti|itit|iidd|iifgg}|ii|ii}d}xq|oid}x\|D]T}xK|D]C}|i ||}|gjo|i |o d}qqqWqyWqeW|S(s An LR(1) item of a grammar G is a production of G with a dot at some position of the right hand side and a terminal: (rule_number,dot_position,terminal) (aho86:_compil page 231) ii( RaRbRR&RRRMRORPRR(RRRR`R;R*R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRsF c Csd}d}x|D]}|d|}x|D]w}|\}}}|ii|d} |ii|d} |d| ti| | dti| |d|f}q.W|d7}qW|GH|S(sPrint C{LR(1)} items RisI_%d: is %s -> %s . %s , %s RV(RRRRW( RRR*RUR;R9RR{RERXRY((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs 5cCsK|\}}}y|ii|d|}Wntj o d}nX|S(s/ returns symbol next to the dot or empty stringiR(RRR(RR9RBR;RER*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCs\|\}}}y |ii|d|d}Wntj o g}nX|i||S(s/ returns symbol next to the dot or empty stringi(RRRR (RR9RBR;RER*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   cCs)|\}}}|i||d|dS(NR(R(RR9R;RBR_RE((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs( R0R1R2RRRRRRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRts    t LALRtable1cBsheZdZdZdZdZdZdZdZdZ dZ d Z d Z RS( s+Class for construction of C{LALR(1)} tablesc Cs|ii|i}to|i|GHnh|_h|_x|tt|D]h}x||i D]}|i |}||ii joS|i |||}|i ||}|djo|i||d|qXqn|djob|ii|dd}||iijo|i|||qX|i||iidgqnqnWx`|iiD]R}|i |||}|i ||}|djo||i||f %s . %s, %s RV(RSR&RwRRRRW( RRR*RUR;R9RR{RXRY((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR s  AcCsh}x|iD]\}}|i||f|jou|i||dfp!tig|||df sX.r,a)} such that C{(A -> s.Xr,a)} in C{I}i(RwRR RaRbR R(RRR*RRBR;RE((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs !'cCsd}x|od}x|iD]}|i|}||iijo|iii|o|i||}xy|ii|D]c}|i|dfptig||df s.Bt,a] in closure(I),for B ->r and each terminal b in first(ta), add [B ->.r,b] to closure(I) ii( RwRRRPRRR RRaRbRc(RRR`R;R*RRB((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR)s  ) cCs?x8|D]0}|i|ijo|i|SqqWdS(RVi(RwR(RRRUR;((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR?s cCs|hjp ||jodSnd}xq|D]i}|i|ijoJd}x<|iD].}||i||djo d}qaqaWPq/q/W|djo|i|}n|S(RViii(RwRcR(RRRUR`R;R_((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRFs   cCsH|\}}y|ii|d|}Wntj o d}nX|S(s/ returns symbol next to the dot or empty stringiR(RRR(RR9RBR;R*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRUs   cCs|\}}y |ii|d|d}Wntj o g}nXtig}xI||D]=}|i||i|ii||t|d=qbW|S(sC returns FIRST of strings after the dot concatenated with lookaheadi( RRRRaRbR RcReR&(RR9RRBR;R*RR<((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR^s     cCs?|\}}x,|||D]}|i||d|qWdS(NR(R(RR9RR;RBR_R<((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRms ( R0R1R2RRRRRRRRRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs !       t LALRtablecBs;eZdZdZdZdZdZdZRS(s&Class for construction of LALR tables c Cs:|ii|ii|i}to|i|GHnh|_h|_xtt |D]}xb||i D]P}|i |}||ii jom|ii i|oexb|ii |D]L}|ii||fo-|i||f}|i||d|qqWn|iii|ox|ii|i D]}x|ii||D]}|i||||} |iii|oexb|ii|D]L} |ii| dgjo+x(| D]}|i||d| qWqqWqfqfWqKWqq{||iijoJ|ii||fo-|i||f}|i||d|qq{|ii|dd} | |iijo|i|||q{|i||iidgq{Wx`|ii D]R} |i||| } |i|| }|djo||i|| f sX.r,a) such that (A -> s.Xr,a) is in Iii( RwRR RaRbRRtRRR( RRR*RRBR;txR<R_((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   %:1cCsH|\}}y|ii|d|}Wntj o d}nX|S(s/ returns symbol next to the dot or empty stringiR(RRR(RR9RBR;R*((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   c Cs|o|\}}|p|Sq&n|\}}y[|ii|ii|d|d}|iiti|ii|d|d} Wntj og}d} nX| o ||Sn|SdS(sK returns FIRST of strings after the dot concatenated with lookaheadiN(RReRR^RRWR( RR9RtpathR{RBRR;RxRy((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   (3  (R0R1R2RRRRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRrs  3 <  RcBs eZdZdZdZRS(s)Class for LR table construction report: @ivar expect: number of shit/reduce conflicts expected @type expect: integer @ivar items: set of LR items @ivar conflicts: dictionary of conflicts occurred in LR table construction: 'rr' and 'sr' cCs(||_||_h|_d|_dS(N(RRRRCR(RRR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   cCsZy$|i|i||||fWn/tj o#||||fg|i|ss State is %ss %siRR*Rs %s%sis s%s%ss%s s %s = %s%s cSs|dS(i((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRss %ss%s = %s s!Action table: %s Goto table:%s ( tmapRRwtsortR&RORaRbR RRP( RRta1R<R;ta3Rta2tactgo((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5|sx     %    0      )c CsZt|_|iidgf||_|ii|i|if||_g|_d|_x|ii d}|i|id}t o5dt d|i|i|fGHd|iGHny|i ||fddjoYt o dGHn|ii|i ||fd|i|idf|id|_nr|i ||fddjo|i ||fd}t o d |t |i|fGHng}tt|i|dD]}||iidq~}|i|io g} n!t|i|d ||i} ~|ii d} |i|d}|ii|i| |f| f|ii|n5|i ||fd gfjoPnt||Wqetj o-t od |i GHnt||qetj oK} t od ||i|d fGHnt| ||i|d qeXqe|ii dS(sLR Parsing Algorithm (aho86:_compil, page 218) @param tokens: pairs (TOKEN, SPECIAL_VALUE) @param context: a computational context for semantic actions @return: parsed result isInput: %s State: %scSs|dS(i((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRss Stack: %sRsAction: shift iRsAction: reduce %s %s iRsError in action: %ssSemantic Rule %d %s(RgtstackRjttokensR RMtcontexttoutputtipttopRRRtstrRRSR&RlRRt ReductionRR?RrR@( RRRR*R<RBt_[1]R;tsemargsRR+R,((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytparsingsZ     % 5 E    # %cCs0t||_|ii|||iidS(s Transforms a string into a grammar description @param st: is a string representing the grammar rules, with default symbols as below. Fisrt rule for start. I{Example}:: reg -> reg + reg E{lb}E{lb} self.OrSemRule E{rb}E{rb} // priority 'left'| ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; where: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS ruleR(t Yappy_grammartpgtinputR(RRRR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt parse_grammarscKs7|p7tddddddddd d d d d d}ng}ti||d }x|D]}ti|di|}|pqaqa|idjot|n|d|i!i}|i t |jot|qati||i }|djog} t } d} qati||i |d }x|D]} ti| } |djog} t } d} qYti|dd|d | }|p"ti| d} t } d} n|idjot| n#ti| d|i!i} |idoti|id|d} t | djoti| d} d} qt | djo*ti| d} ti| d} qt| n t } d} | djo |i|| t| fqY|i|| t| t| fqYWqaW|S(sh Transforms a string in a grammar description @param rulestr: is a string representing the grammar rules, with default symbols as below. @param sym: Dictionary with symbols used. Default ones: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS rule Example: reg -> reg + reg E{lb}E{lb} self.OrSemRule // (priority,'left') E{rb}E{rb} | ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; trulesyms->trhssepRtopsyms//tsemsyms{{tcsemsyms}}trulesept|truleendt;is[]s (?P.*)topsemiiN(tDictRtsplitRRR"R#R7tstripR%R&t EmptySemRuleRCRLtgroupR teval(RtrulestrtsymRtrlRR,RXtrhssRYtsemRtrestR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytgsrulessp       " "   -N( R0R1R2RRCRR5RRR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR(s A < 4 t LRBuildparsercBs eZdZdZdZRS(sEClass for LR parser: without shelve and semantic rules(obsolete) cCst||_dS(s N(RR(RRT((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRZscCs1t|_|iid||_|ii|iiig|_d|_ x|ii }|i|i }|ii i ||fpt ||qS|ii ||fddjo8|ii|ii ||fd|i d|_ qS|ii ||fddjo|ii ||fd}x8tt|iii|dD]}|iiqYW|ii }|iii|d}|iii ||fpt ||q*|ii|ii||f|ii|qS|ii ||fdgfjoPqSt qSdS(sLR Parsing Algorithm iRiRRN(RgRRjRR RRRMRRRRR R?RSR&RRlR(RRR*R<RBR;R+((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR_s8    !$!$ #(R0R1R2RR(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRVs cKs|S(s1Create a dict out of the argument=value arguments((tentries((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRss->c Csg}ti|}x|D]}t|tjo |}n |d}|i|}|pqn|idjot|n|d|i!i}|it |jot|nCt i||i} | djo g} nt i | |} t|tjo|i || t fqt |djo%|i || |d|dfqt |djo|i || |dfqt|qW|S(s Transforms a list of rules in a grammar description. If a rule has no semantic rules, C{DefaultSemRule} is assumed. @param rules_list: is a list of pairs (rule,sem) where rule is a string of the form: - Word rulesym Word1 ... Word2 - Word rulesym [] @param rulesym: LHS and RHS rule separator @param rhssep: RHS values separator (None for white chars) @return: a grammar description is[]iii(RRRR R"R#R7RR%R&RRR RL( RRRRtsepRR'R,RXRRY((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytgruless6     %tYappycBsVeZdZddedddZd hddZhdZdZdZ RS( sG A basic class for parsing. @ivar lex: a Lexer object tYappyTabiic Kst||_d } |iiido|ii} nt|tjo%|i|ht d<|}n|ido6t i i |dot i|dd|}nt i i|djpt i it i i|o)ti|||||| |||ntiid|ti|iio}|iiido#t|iid|iijp|iiido.d t|iid|iifGHd GHnd S( s@param tokenize: same as for L{Lexer} @param grammar: if a string C{parse_grammar} is called @param table: and no_table, tabletype same as for L{LRparser} @param args: dictionary where: - key C{tmpdir} is the directory where the parse table used by the Yappy Grammar is stored; - key C{usrdir} is the directory where the user tables are stored - key C{nosemrules} if 1 semantic actions are not appliedRtlocalstusrdirt/RsDirectory %s do not exist RRs LR conflicts: number %s value %ssHIf it is Ok, set expect to the number of conflicts and build table againN(RtlexRCR R RRR RRRRtisdirRtrstriptdirnametexistsRRtsyststderrtwritetexitRRRR&R( RttokenizeRTRRRRRRR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs" %'8) #&%cCsg|o|ii||_ndG|ii|_|o |iSn||_|i|i|iS(s Reads from stdin or string and retuns parsed result @param str: String to be parsed. If not given, reads from C{stdin}. @param context: some initial computational context @param lexer: if 1 only lexical analisys is performed @return: a tuple C{(parsed result,context)} or only the C{parsed result} sInput: (RRRR/RR(RRRtlexer((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs   cCsHyt|d}Wntj otnX|i|i|S(sReads input from file R(RR.R3Rtread(RtFileNameRtfile((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt inputfiles cCsdS(sTo be defined using outputN((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt parse_treescCsdS(sA test for each classN((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyttestsN( R0R1R2RRRCRRR R!(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR s  cCstid}|i|}xfg}|D]-}||id||iddfq,~D]#}ti||d|d}q`W|S(Ns\$(\d+)iit](RRtfinditerRRtreplace(tstrargststrfuntregargst matchargsRRR;((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt expandSemRules B!cCst|ot|||gSnrt|tjoQtd|}|idh}|itt||idh|Snt d|dS(sReduction function for semantic rules: - C{fun} can be: -- a function -- or a string with positional arguments C{$n} that is expanded and evaluated with C{eval} ssargs[RtglobalssWrong type: %sN( tcallableR$RR R)tgettupdateRRR@(R(tsargsRR<R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCs|dS(sDefault semantic rulei((R.R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRLscCsgS(N((R.R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRsRcBszeZdZddedZdZdZdZdZdZ d Z d Z d Z d Z d ZRS(s; A parser for grammar rules. See C{test()} for an example. is yappypar.tabcKsXtd|ifdtfd|ifd|ifd|ifdtfd|ifd|ifd |ifd |ifd |ifd |ifd |i fd|i fg}ddfd ddfddfddfddfddfg}|i do#ht i |ddd<}nh}ti||||||dS(!Ns G -> RULE GsG -> []s RULE -> ID rulesym MULTI ruleendsMULTI -> RHS rulesep MULTIs MULTI -> RHSs RHS -> []sRHS -> RH OPSEMs RH -> ID RHsRH -> IDs OPSEM -> []s OPSEM -> IDSsOPSEM -> IDS OPsOP -> opsym OPVsOPV -> ID ID s \{\{.*\}\}cSsdti|dd!fS(tIDSii(RR(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR=ss\s+Rs->cSs d|fS(R((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR?ss\|cSs d|fS(R((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR@sRcSs d|fS(R((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRAss//cSs d|fS(R((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRDss.*cSs d|fS(tID((R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyREsttmpdirRR(s\s+R(R tGRuleRtRULERulet MULTIRuletRHSRuletRHRulet OPSEMRulet OPSEMRule1tOPRuletOPVRuleR RRR R(RRRRRRTRtargs1((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR's4                  #cCsyt|dWn'tj otd|dnX|ddjo:|ddjo)|ddjotd|dnt|d|dfS(RVis'Precedence must be an integer: %s giveniRtrighttnoassocsEAssociativity must be 'left' or 'right' or 'noassoc': %s given(tintRR@(RtargR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR:Ns3cCs|dS(Ni((RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR9YscCsgt|djo|d|dfSnt|djo |dSnt|djodSndS(NiiiiiRL(R&(RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR7\s  cCsgt|djo|d|dfSnt|djo |dSnt|djodSndS(NiiiRL(R&(RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR8ds  cCsPt|djo|dgSnt|djo|dg|dSndS(Niii(R&(RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR6mscCs|d|dfS(Nii((RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR5sscCs9t|djo|dgSn|dg|dSdS(Niii(R&(RR?R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR4vscs9|dfdtfd|dS(Nics|gjogtfSnt|dtjo;|dt|ddtd|ddfSn)|dt|dtdfSdS(NiiR(RRt TupleTypeRR*(RR(RRX(s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytgrules  ;cs |S(((R(RRA(s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRsi(R(RR?R((RARRXRs./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR3|s cCs<|ido|d|d|d reg + reg {{DefaultSemRule}} // 200 left | reg reg {{DefaultSemRule}} // 200 left | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{lambda l,c:l[0]}}; reg -> ; a -> reg | reg ; s reg -> reg + reg {{DefaultSemRule // 200 left}} | reg reg {{DefaultSemRule // 200 left}} | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{DefaultSemRule}}; reg -> ; a -> reg | reg ; RR(RRR(RRR)((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR!s  (R0R1R2RRR:R9R7R8R6R5R4R3R2R!(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyR$s'      RgcBseZdZgdZdZdZdZdZdZdZ dZ d Z d Z d Z d Zd ZdZRS(s# A simple class to implement stackscCs8g|_x|D]}|i|qW|iidS(sReverse initial stack objectsN(RRjR(RR#R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCs|g|i|_dS(N(R(Rtobject((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRjscCs6|ip dn|id|id}|_|S(Nsstack underflowii(R(RR((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRls  cCs|ip dn|idS(s% Returns top of stack (not poping it)sstack underflowi(R(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRs  cCs|i S(s Tests if stack is empty(R(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytemptyscCs g|_dS(s Empties stackN(R(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pytpopallscCs d|iS(Ns [Stack:%s](R(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt__repr__scCst|i|iS(N(tcmpR(Rtother((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt__cmp__scCs t|iS(N(R&R(R((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt__len__scCst|i|iS(N(RgR(RRG((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt__add__scCst|i|S(N(RgR(Rtreps((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt__mul__scCs |i|S(N(R(Rtoffset((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt __getitem__scCst|i||!S(N(RgR(Rtlowthigh((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt __getslice__scCst|i|S(N(tgetattrR(Rtname((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyt __getattr__s(R0R1R2RRjRlRRCRDRERHRIRJRLRNRQRT(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pyRgs             (0R2ttypesRRRRmttimetoperatorRtos.pathRtanydbmRRaRRtNILRRR3RR R7R8R=R>R?R@RDRFRRRRRRRRRRCR R R)RRLRRRg(((s./mnt/hgfs/Users/rvr/Work/Yappy/yappy/parser.pys#sP <$ z       ~`b/- /J    yappy-1.9.4/yappy/parser.py~0000640000076600000240000021517711404125030014375 0ustar dialout# -*- coding: utf-8 -*- """ This is part of Yappy parser.py -- Yet another parser for python... A LR parser generator, based on Aho and al. 1986, C{Compilers} (aho86:_compil). It currently builds C{SLR}, C{LR(1)} and C{LALR(1)} parsing tables. Copyright (C) 2000-2003 Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt Version: $Id: parser.py,v 1.18 2006-07-19 09:52:06 rvr Exp $ This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. @author: Rogério Reis & Nelma Moreira {rvr,nam}@ncc.up.pt @var _DEBUG: if nonzero, display information during parser generation or parsing. @type _DEBUG: integer """ from types import * import re, exceptions, string import sys, string, copy, time, operator import os.path import shelve # set elements are mutable objects; we cannot use sets import osets #Globals _DEBUG=0 _Version = 1.7 NIL = "" class Lexer: """Class for lexical analyser to use with the parser @ivar rules: lexical rules @ivar operators: precedence and associativity for operators @type operators: dictionary """ def __init__(self,rules_list): """ By now lexer is kept as simple as possible, so order is really essential: i.e. if a keyword is substring of another its rule must appear after the larger keyword for the obvious reasons... @param rules_list: contains pairs C{(re,funct,op?)} where: C{re}: is an uncompiled python regular expression C{funct}: the name of a funcion that returns the pair C{(TOKEN, SPECIAL_VALUE)}, where C{TOKEN} is the token to be used by the parser and C{SPECIAL_VALUE} an eventual associated value. The argument is the matched string. If C{funct} equals C{""} the token is ignored. This can be used for delimiters. C{op}: if present, is a tuple with operador information: C{(TOKEN,PRECEDENCE,ASSOC)} where C{PRECEDENCE} is an integer and C{ASSOC} the string 'left' or 'right'. """ self.rules = [] rnumber = 1 for r in rules_list: try: rex = r[0] funct = r[1] except IndexError: raise LexicalError(rnumber,r) try: rec = re.compile(rex) except TypeError: raise LexicalRulesErrorRE(rex,rnumber) try: op,prec,assoc = r[2] if not self.__dict__.has_key("operators"): self.operators = {} if not self.operators.has_key(op): self.operators[op] = (prec,assoc) except IndexError: pass self.rules.append((rec,funct)) rnumber = rnumber + 1 if _DEBUG and self.__dict__.has_key("operators"): print "operators %s" %self.operators def scan(self,string): """Performs the lexical analysis on C{string} @return: a list of tokens (pairs C{(TOKEN , SPEcial_VALUE )}), for recognized elements and C{("@UNK", string )} for the others""" st = [string] for r in self.rules: st = self.scanOneRule(r,st) return self.scanUnknown(st) def scanOneRule(self,rule,st): """Scans space C{st} according only one rule @param rule: one rule C{(re,fun,op)} @param st: is a list of strings and already matched structures """ re = rule[0] fun = rule[1] st1 = [] for s in st: if not isinstance(s, StringType): st1.append(s) else: s1 = s while True: m = re.search(s1) if not m: st1.append(s1) break else: if m.start() != 0: st1.append(s1[0:m.start()]) # if fun == "": # st1.append(("",s1[m.start():m.end()])) # else: if fun != "": st1.append(apply(fun,[s1[m.start():m.end()]])) if m.end() == len(s1): break else: s1 = s1[m.end():] return st1 def scanUnknown(self,st): """Scans the resulting structure making Unknown strings Unknown parts will be of the form ("@UNK", string ) """ st1 = [] for s in st: if isinstance(s, StringType): st1.append(("@UNK",s)) else: st1.append(s) return st1 def readscan(self): """Scans a string read from stdin """ st = raw_input() if not st: raise IOError if isinstance(st, StringType): s = self.scan(st) return s class YappyError(Exception): """Class for all Yappy exceptions""" pass class LexicalError(YappyError): """Class for all Yappy Lexical analyser exceptions""" def __init__(self,r,rule): self.message = 'Error in rule number %s: %s'%(r,rule) def __str__(self): return "%s" % (self.message) class LexicalRulesErrorRE(YappyError): """An error occured parsing the RE part of a lexical rule""" def __init__(self,re,no=0): self.message = 'Error in RE "%s" at rule n.%d'%(re,no) self.rule = no self.re = re def __str__(self): return "%s" % (self.message) class GrammarError(YappyError): """Class for input grammar errors """ def __init__(self,rule): self.message = 'Error in rule "%s" '%rule class SLRConflictError(YappyError): """Confliting actions in building SLR parsing table. Grammar is not SLR(0)""" def __init__(self,i,a): self.message = 'Confliting action[%d,%s] in SLR parsing table ' %(i,a) self.item = i self.symbol = a class LRConflictError(YappyError): """Conflicting actions in building LR parsing table. Grammar is not LR(1)""" def __init__(self,i,a): self.message = 'Confliting action[%d,%s] in LR(1) parsing table ' %(i,a) self.item = i self.symbol = a def __str__(self): return "%s" % (self.message) class LRConflicts(YappyError): """Confliting actions in building LR parsing table. Grammar is not LR(1)""" def __init__(self): self.message = """Warning>>> Several confliting actions. Please consult self.Log for details""" def __str__(self): return "%s" % (self.message) class LRParserError(YappyError): """An error occured in LR parsing program""" def __init__(self,s,a): self.item = s self.symbol = a self.message = 'Error in LR: (%s,%s) not found' %(self.item,self.symbol) def __str__(self): return "%s" % (self.message) class SemanticError(YappyError): """An error occured in the application of a semantic action""" def __init__(self,m,n=0,r=None): self.message = m self.nrule = n self.rule = r def __str__(self): return "%s in semantic rule %d: %s" % (self.message,self.nrule,self.rule) class TableError(YappyError): """Mismatch table version """ def __init__(self,t): self.message = """A new table must be built. Please remove table shelve %s or set no_table to 0""" %t def __str__(self): return "%s" % (self.message) class CFGrammar: """ Class for context-free grammars @ivar rules: grammar rules @ivar terminals: terminals symbols @ivar nonterminals: nonterminals symbols @ivar start: start symbol @type start: string @ivar ntr: dictionary of rules for each nonterminal """ def __init__(self,grammar): """ @param grammar: is a list for productions; each production is a tuple C{(LeftHandside,RightHandside,SemFunc,Prec?)} with C{LeftHandside} nonterminal, C{RightHandside} list of symbols, C{SemFunc} syntax-direct semantics, if present C{Prec (PRECEDENCE,ASSOC)} for ambiguous rules First production is for start symbol Special symbols: C{@S}, C{$}, C{#} """ """ MUST BE IN THIS ORDER""" self.rules = grammar self.makenonterminals() self.maketerminals() self.start = self.rules[0][0] self.aug_start = "@S" self.rules.append((self.aug_start,[self.start],DefaultSemRule)) self.endmark = '$' self.dummy = '#' self.terminals.append(self.endmark) self.terminals.append(self.dummy) self.nonterminals.append(self.aug_start) """ ritems are only for control ... not needed """ self.ritems = [] """ ntr[A] is the set of rules which has A as left side""" self.ntr = {} i = 0 for r in self.rules: if not self.ntr.has_key(r[0]): self.ntr[r[0]] = [i] else: self.ntr[r[0]].append(i) for j in range(len(r[1]) + 1): self.ritems.append((i,j)) i = i + 1 def __str__(self): """Grammar rules @return: a string representing the grammar rules """ s = "" for n in range(len(self.rules)): lhs = self.rules[n][0] rhs = self.rules[n][1] s = s + "%s | %s -> %s \n" %(n, lhs, string.join(rhs," ")) return "Grammar Rules:\n\n%s" % s def makeFFN(self): self.NULLABLE() self.FIRST_ONE() self.FOLLOW() def maketerminals(self): """Extracts C{terminals} from the rules. C{nonterminals} must already exist""" self.terminals = [] for r in self.rules: for s in r[1]: if s not in self.nonterminals and s not in self.terminals: self.terminals.append(s) def makenonterminals(self): """Extracts C{nonterminals} from grammar rules.""" self.nonterminals = [] for r in self.rules: if r[0] not in self.nonterminals: self.nonterminals.append(r[0]) def NULLABLE(self): """Determines which nonterminals C{X ->* []} """ self.nullable = {} for s in self.terminals: self.nullable[s] = 0 for s in self.nonterminals: self.nullable[s] = 0 if self.ntr.has_key(s): for i in self.ntr[s]: if not self.rules[i][1]: self.nullable[s] = 1 break k = 1 while k == 1: k = 0 for r in self.rules: e = 0 for i in r[1]: if not self.nullable[i]: e = 1 break if e == 0 and not self.nullable[r[0]]: self.nullable[r[0]] = 1 k = 1 def FIRST(self,s): """C{FIRST(s)} is the set of terminals that begin the strings derived from s """ first = osets.Set([]) e = 0 for i in range(len(s)): first.s_extend(self.first[s[i]]) if not self.nullable[s[i]]: e = 1 break if e == 0: self.nullable[string.join(s)] = 1 else: self.nullable[string.join(s)] = 0 return first def FIRST_ONE(self): """Determines C{FIRST(s)}, for every symbol s, that is the set of terminals that begin the strings derived from s """ self.first = {} self.nd = {} self.ms =Stack() for s in self.terminals: self.first[s] = osets.Set([s]) for s in self.nonterminals: if self.ntr.has_key(s) and not self.first.has_key(s): # self.FIRST_NT(s) self.FIRST_TRA(s,1) def FIRST_TRA(self,s,d): """Transitiv closure of C{FIRST(X)} """ self.ms.push(s) self.nd[s] = d """ calculating F1(s)""" self.first[s] = osets.Set([]) for i in self.ntr[s]: for y in self.rules[i][1]: if self.nullable[y]: continue else: if y in self.terminals: self.first[s].append(y) break """transitive closure""" for i in self.ntr[s]: for y in self.rules[i][1]: if y in self.nonterminals: if not self.first.has_key(y): self.FIRST_TRA(y,d+1) if self.nd.has_key(y) and self.nd[y] != -1: self.nd[s] = min(self.nd[s],self.nd[y]) self.first[s].s_extend(self.first[y]) if self.nullable[y]: continue else: break else: break if self.nd[s] == d: while 1: y = self.ms.pop() if y == s: break self.first[y] = self.first[s].copy() self.nd[y] = -1 def FIRST_NT(self,s): """ Recursivelly computes C{FIRST(X)} for a nonterminal X""" if not self.ntr.has_key(s): return self.first[s] = osets.Set([]) for i in self.ntr[s]: r = self.rules[i][1] if r == []: self.nullable[s] = 1 else: e = 1 for y in r: if not self.first.has_key(y): self.FIRST_NT(y) self.first[s].s_extend(self.first[y]) if not self.nullable[y]: e = 0 break if e == 1: self.nullable[s] = 1 def FOLLOW(self): """computes C{FOLLOW(A)} for all nonterminals: the set of terminals a that can appear immediately to the right of A in some sentential form.""" self.follow = {} self.follow[self.start] = osets.Set([self.endmark]) for rule in self.rules: r = rule[1] for i in range(len(r)): if r[i] in self.nonterminals: if not self.follow.has_key(r[i]): self.follow[r[i]] = osets.Set([]) j = i + 1 self.follow[r[i]].s_extend(self.FIRST(r[j:])) e = 1 while e: e = 0 for s in self.nonterminals: for i in self.ntr[s]: r = self.rules[i][1] try: b = r[len(r)-1] if b in self.nonterminals and self.follow[b].s_extend(self.follow[s]): e = 1 except IndexError: pass except KeyError: pass for k in range(len(r)-1): j = k + 1 if r[k] in self.nonterminals and self.nullable[string.join(r[j:])]: if self.follow[r[k]].s_extend(self.follow[s]): e = 1 break def TransClose(self): """For each nonterminal C{s} determines the set of nonterminals a such that C{s ->* ar}, for some C{r}""" self.close_nt = {} self.nd = {} self.ms =Stack() for s in self.nonterminals: if self.ntr.has_key(s) and not self.close_nt.has_key(s): self.TRAVERSE(s,1) def TRAVERSE(self,s,d): """ """ self.ms.push(s) self.nd[s] = d """ calculating F1(s)""" self.close_nt[s] = {s:osets.Set([[]])} for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): if r[j+1:]: f = self.FIRST(r[j+1:]) ns = self.nullable[string.join(r[j+1:])] else: f = [] ns = 1 if r[j] in self.nonterminals: if not self.close_nt[s].has_key(r[j]): self.close_nt[s][r[j]] = osets.Set([[]]) if r[j+1:]: self.close_nt[s][r[j]].append((f,ns)) if not self.nullable[r[j]]: break else: break """reflexive tansitive closure""" for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): f = self.FIRST(r[j+1:]) ns = self.nullable[string.join(r[j+1:])] if r[j] in self.nonterminals: if not self.close_nt.has_key(r[j]): self.TRAVERSE(r[j],d+1) if self.nd.has_key(r[j]) and self.nd[r[j]] != -1: self.nd[s] = min(self.nd[s],self.nd[r[j]]) for k in self.close_nt[r[j]].keys(): if not self.close_nt[s].has_key(k): self.close_nt[s][k] = osets.Set([[]]) else: for v in self.close_nt[s][k]: if not v: self.close_nt[s][k].append((f,ns)) else: p, n = v if n: self.close_nt[s][k].append((p+f,ns)) else: self.close_nt[s][k].append((p,n)) if not self.nullable[r[j]]: break else: break if self.nd[s] == d: while 1: y = self.ms.pop() if y == s: break self.close_nt[y] = self.close_nt[s].copy() self.nd[y] = -1 def DERIVE_NT(self): """For each nonterminal C{s} determines the set of nonterminals a such that C{s ->* ar}, for some C{r}""" self.derive_nt = {} for s in self.nonterminals: if self.ntr.has_key(s) and not self.derive_nt.has_key(s): self.DERIVE_ONE_NT(s) def DERIVE_ONE_NT(self,s): """For nonterminal C{s} determines the set of nonterminals a such that C{s -> ar}, for some C{r} """ if not self.ntr.has_key(s): return self.derive_nt[s] = {s:osets.Set([None])} for i in self.ntr[s]: if not self.rules[i][1]: continue else: r = self.rules[i][1] for j in range(len(r)): if r[j] in self.nonterminals: if not self.derive_nt.has_key(r[j]): self.DERIVE_ONE_NT(r[j]) for k in self.derive_nt[r[j]].keys(): if not self.derive_nt[s].has_key(k): self.derive_nt[s][k] = osets.Set([]) for p in self.derive_nt[r[j]][k]: if not p : self.derive_nt[s][k].append(r[j+1:]) else: self.derive_nt[s][k].append(r[j+1:].append(p)) if not self.nullable[r[j]]: break else: break def DERIVE_T(self): """ """ self.derive_ter = {} for s in self.terminals: self.derive_ter[s] = osets.Set([s]) e = 1 while e: e = 0 for s in self.nonterminals: for i in self.ntr[s]: r = self.rules[i][1] if r == []: continue for i in range(len(r)): if r[i] in self.terminals: if i < len(r) -1: if self.derive_ter.has_key(r[i+1]): if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_append(r[i]): e = 1 break else: if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_append(r[i]): e = 1 break else: """ non-terminal""" if self.derive_ter.has_key(r[i]): if not self.derive_ter.has_key(s): self.derive_ter[s] = osets.Set([]) if self.derive_ter[s].s_extend(self.derive_ter[r[i]]) == 1: e = 1 if i > 0 and self.nullable[r[i]]: continue else: break class LRtable: """Class for construction of a C{LR} table @ivar gr: a context-free grammar @ivar operators: operators @ivar Log: Log report for LR table construction """ def __init__(self,cfgr,operators=None,noconflicts=1,expect=0): """ @param cfgr: a context-free grammar @param operators: operators @param noconflicts: if 0 LRtable conflicts are not resolved, unless for spcecial operator rules @type noconflicts: integer @param expect: exact number of expected LR shift/reduce conflicts @type expect: integer """ self.gr = cfgr self.gr.makeFFN() self.operators = operators self.precedence = None # if self.operators: self.rules_precedence() self.Log=LogLR(noconflicts,expect) self.make_action_goto() def make_action_goto(self): """ make C{action[i,X]} and C{goto[i,X]} All pairs C{(i,s)} not in action and goto dictionaries are 'error' """ c = self.items() if _DEBUG: print self.print_items(c) self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i]: a = self.NextToDot(item) if a in self.gr.terminals: state = self.goto(c[i],a) try: j = c.index(state) self.add_action(i,a,'shift',j) except IndexError: if _DEBUG: print "no state" elif a == "": """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start : self.dotatend(item,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) try: j = c.index(state) self.GOTO[(i,s)] = j except ValueError: pass def rules_precedence(self): """Rule precedence obtained as the precedence of the right most terminal. """ self.precedence={} for i in range(len(self.gr.rules)): if len(self.gr.rules[i]) == 4: self.precedence[i] = self.gr.rules[i][3] else: self.precedence[i] = None if self.operators: self.gr.rules[i][1].reverse() for s in self.gr.rules[i][1]: if self.operators.has_key(s): self.precedence[i] = self.operators[s] break self.gr.rules[i][1].reverse() if _DEBUG: print "Precedence %s" %self.precedence def add_action(self,i,a,action,j): """Set C{(action,j)} for state C{i} and symbol C{a} or raise conflict error. Conficts are resolved using the following rules: - shift/reduce: if precedence/assoc information is available try to use it; otherwise conflict is resolved in favor of shift - reduce/reduce: choosing the production rule listed first """ if self.ACTION.has_key((i,a)) and self.ACTION[(i,a)] != (action,j): action1 , j1 = self.ACTION[(i,a)] if _DEBUG: print "LRconflit %s %s %s %s %s %s" %(action,j,action1,j1, i,a) if action1 == 'shift' and action == 'reduce': self.resolve_shift_reduce(i,a,j1,j) elif action == 'shift' and action1 == 'reduce': self.resolve_shift_reduce(i,a,j,j1) elif action == 'reduce' and action1 == 'reduce': if self.Log.noconflicts: # RESOLVED by choosing first rule if j > j1: self.ACTION[(i,a)] = (action,j1) else: self.ACTION[(i,a)] = (action,j) self.Log.add_conflict('rr',i,a,j1,j) else: raise LRConflictError(i,a) else: self.ACTION[(i,a)] = (action,j) def resolve_shift_reduce(self,i,a,s,r): """Operators precedence resolution or standard option: shift C{s}: rule for shift C{r}: rule for reduce """ try: if self.operators and self.operators.has_key(a) and self.precedence.has_key(r) and self.precedence[r]: prec_op, assoc_op = self.operators[a] if (self.precedence[r][0] > prec_op) or (self.precedence[r][0] == prec_op and self.precedence[r][1] =='left'): self.ACTION[(i,a)] = ('reduce',r) if _DEBUG: print "solved reduce %s" %r else: self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "solved shift %s" %s else: self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "solved shift %s" %s except (AttributeError, TypeError, KeyError,NameError): if self.Log.noconflicts: # choose to shift self.ACTION[(i,a)] = ('shift',s) if _DEBUG: print "choose shift %s for action (%s,%s)" %(s,i,a) self.Log.add_conflict('sr',i,a,s,r) if _DEBUG: print " %s for action (%s,%s)" %(self.Log.conflicts,i,a) else: raise LRConflictError(i,a) class SLRtable(LRtable): """Class for construction of a C{SLR} table C{SLR} items represented by a pair of integers C{(number of rule,position of dot)} (aho86:_compil page 221) """ def dotatend(self,item,i): n, k = item l = self.gr.rules[item[0]][0] for a in self.gr.follow[l]: self.add_action(i,a,'reduce',n) def closure(self,items): """The closure of a set of C{LR(0)} items C{I} is the set of items constructed from C{I} by the two rules: - every item of I is in closure(I) - If A -> s.Bt in closure(I) and B -> r, then add B ->.r to closure(I) (aho86:_compil page 223) """ added = {} for l in self.gr.nonterminals: added[l] = 0 close = items[:] e = 1 while e: e = 0 for i in close: s = self.NextToDot(i) if s in self.gr.nonterminals and added[s]==0 and self.gr.ntr.has_key(s): for n in self.gr.ntr[s]: close.append((n,0)) added[s] = 1 e = 1 return close def goto(self,items,s): """ goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items A -> sX.r such that A -> s.Xr is in I""" valid = osets.Set([]) for item in items: if self.NextToDot(item) == s: n, i = item valid.append((n, i + 1)) return self.closure(valid) def items(self): """ An LR(0) item of a grammar G is a production of G with a dot at some position on the right hand side. It is represented by the rule number and the position of the dot @return: a set of sets of items """ c = osets.Set([self.closure(osets.Set([(len(self.gr.rules) - 1,0)]))]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: valid = self.goto(i,s) if valid != [] and valid not in c: c.append(valid) e = 1 return c def print_items(self,c): """Print SLR items """ s = "" j = 0 for i in c: s = s+ "I_%d: \n" %j for item in i: r, p = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s \n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," ")) j += 1 return s def NextToDot(self,item): """ returns symbol next to te dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s class LR1table(LRtable): """ Class for construction of a LR1 table Items are represented by a pair of integers (number of rule, position of dot) """ def closure(self,items): """The closure of a set of C{LR(1)} items C{I} is the set of items construted from I by the two rules: - every item of C{I} is in C{closure(I)} - If C{[A -> s.Bt,a]} in C{closure(I)},for C{B ->r} and each terminal C{b} in C{first(ta)}, add C{[B ->.r,b]} to C{closure(I)} """ close = items e = 1 while e: e = 0 for i in close: s = self.NextToDot(i) sa = self.gr.FIRST(self.AfterDot(i)) if s in self.gr.nonterminals and self.gr.ntr.has_key(s): for n in self.gr.ntr[s]: for b in sa: e = close.append((n,0,b)) return close def goto(self,items,s): """ goto(I,X) where I is a set of items and X a grammar symbol is the closure of the set of all items (A -> sX.r,a) such that (A -> s.Xr,a) in I""" valid = osets.Set([]) for item in items: if self.NextToDot(item) == s: n, i, t = item valid.append((n, i + 1,t)) return self.closure(valid) def items(self): """ An LR(1) item of a grammar G is a production of G with a dot at some position of the right hand side and a terminal: (rule_number,dot_position,terminal) (aho86:_compil page 231) """ c = osets.Set([ self.closure(osets.Set([(len(self.gr.rules) - 1,0,self.gr.endmark)]))]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: valid=self.goto(i,s) if valid != [] : if c.s_append(valid): e = 1 return c def print_items(self,c): """Print C{LR(1)} items """ s = "" j = 0 for i in c: s = s+ "I_%d: \n" %j for item in i: r, p, t = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s , %s\n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," "),t) j += 1 print s return s def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i, t = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDot(self,item): """ returns symbol next to the dot or empty string""" n, i, t = item try: s = self.gr.rules[n][1][i+1:] except IndexError: s = [] s.append(t) return s def dotatend(self,item,i): n, k, t = item self.add_action(i,t,'reduce',n) class LALRtable1(LRtable): """Class for construction of C{LALR(1)} tables""" def make_action_goto(self): """ Make C{action[i,X]} and C{goto[i,X]} all pairs C{(i,s)} not in action and goto dictionaries are 'error' """ self.gr.DERIVE_NT() c = self.items() if _DEBUG: print self.print_items(c) self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i].keys(): a = self.NextToDot(item) if a in self.gr.terminals: state =self.goto(c[i],a) j = self.get_union(c,state) if j != -1: self.add_action(i,a,'shift',j) elif a == "": """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start : self.dotatend(item,c,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) j = self.get_union(c,state) if j != -1: self.GOTO[(i,s)] = j def items(self): """ An C{LALR(1)} item of a grammar C{G} is a production of C{G}with a dot at some position of the right hand side and a list of terminals: is coded as a dictonary with key C{(rule_number,dot_position)} and value a set of terminals """ i0 = {} i0[(len(self.gr.rules) - 1,0)] = osets.Set([self.gr.endmark]) c = osets.Set([self.closure(i0)]) symbols = self.gr.terminals + self.gr.nonterminals e = 1 while e: e = 0 for i in c: for s in symbols: if self.core_merge(c,self.goto(i,s)) == 1: e = 1 return c def print_items(self,c): """Print C{LALR(1)} items """ s = "" j = 0 for i in range(len(c)): s = s+ "I_%d: \n" %i for item in c[i].keys(): r, p = item lhs = self.gr.rules[r][0] rhs = self.gr.rules[r][1] s = s + "\t %s -> %s . %s, %s \n" %(lhs, string.join(rhs[:p]," "), string.join(rhs[p:]," "),c[i][item]) print s return s def goto(self,items,s): """ C{goto(I,X)} where C{I} is a set of items and C{X} a grammar symbol is the closure of the set of all items C{(A -> sX.r,a)} such that C{(A -> s.Xr,a)} in C{I}""" valid = {} for (n,i) in items.keys(): if self.NextToDot((n,i)) == s: if not valid.has_key((n,i+1)): valid[(n,i + 1)] = osets.Set([]) for t in items[(n,i)]: valid[(n, i + 1)].append(t) return self.closure(valid) def closure(self,items): """The closure of a set of C{LR(1)} items I is the set of items construted from I by the two rules: - every item of I is in closure(I) - If [A -> s.Bt,a] in closure(I),for B ->r and each terminal b in first(ta), add [B ->.r,b] to closure(I) """ e = 1 while e: e = 0 for i in items.keys(): s = self.NextToDot(i) if s in self.gr.nonterminals and self.gr.ntr.has_key(s): l = self.AfterDot(i,items) for n in self.gr.ntr[s]: if not items.has_key((n,0)): items[(n,0)] = osets.Set([]) if items[(n,0)].s_extend(l) == 1 : e = 1 return items def get_union(self,c,j): """ """ for i in c: if i.keys() == j.keys(): return c.index(i) return -1 def core_merge(self,c,j): """ """ if j == {} or j in c : return 0 e = 2 for i in c: if i.keys() == j.keys(): e = 0 for k in j.keys(): if i[k].s_extend(j[k]) == 1: e = 1 break if e == 2: e = c.s_append(j) return e def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDot(self,item,items): """ returns FIRST of strings after the dot concatenated with lookahead""" n, i = item try: s = self.gr.rules[n][1][i+1:] except IndexError: s = [] sa = osets.Set([]) for a in items[item]: s.append(a) sa.s_extend(self.gr.FIRST(s)) del s[len(s)-1] return sa def dotatend(self,item,c,i): n, k = item for a in c[i][item]: self.add_action(i,a,'reduce',n) class LALRtable(LALRtable1): """Class for construction of LALR tables """ def make_action_goto(self): """ collection of LR(0) items """ self.gr.DERIVE_T() self.gr.TransClose() c = self.items() if _DEBUG: print self.print_items(c) """ make action[i,X] and goto[i,X] all pairs (i,s) not in action and goto dictionaries are 'error' """ self.ACTION = {} self.GOTO = {} #shelve not working with osets #self.Log.items = c for i in range(len(c)): for item in c[i].keys(): C = self.NextToDot(item) if C in self.gr.nonterminals: if self.gr.derive_ter.has_key(C): for a in self.gr.derive_ter[C]: if self.goto_ref.has_key((i,a)): j = self.goto_ref[(i,a)] self.add_action(i,a,'shift',j) if self.gr.close_nt.has_key(C): for A in self.gr.close_nt[C].keys(): """Error: ignores end string s in C->*As""" for p in self.gr.close_nt[C][A]: r = self.AfterDotTer(item,c[i],p) if self.gr.ntr.has_key(A): for k in self.gr.ntr[A]: if self.gr.rules[k][1] == []: for a in r: self.add_action(i,a,'reduce',k) elif C in self.gr.terminals: if self.goto_ref.has_key((i,C)): j = self.goto_ref[(i,C)] self.add_action(i,C,'shift',j) else: """ Dot at right end """ l = self.gr.rules[item[0]][0] if l != self.gr.aug_start: self.dotatend(item,c,i) else: """ last rule """ self.add_action(i,self.gr.endmark,'accept',[]) for s in self.gr.nonterminals: state = self.goto(c[i],s) j = self.get_union(c,state) if j != -1: self.GOTO[(i,s)] = j def items(self): """ An C{LALR(1)} kernel item of a grammar C{G} is a production of C{G} with a dot at some position of the right hand side (except the first) and a list of terminals: is coded as a dictionary with key C{(rule_number,dot_position)} and value a set of terminals. """ i0 = {} i0[(len(self.gr.rules) - 1,0)] = osets.Set([self.gr.endmark]) c= osets.Set([i0]) symbols = self.gr.terminals + self.gr.nonterminals """ kernel LR(0) items """ self.goto_ref = {} e = 1 while e: e = 0 for i in c: for s in symbols: valid = self.goto(i,s) if valid != {}: if c.s_append(valid): e = 1 self.goto_ref[(c.index(i),s)] = c.index(valid) """ Discovering propagated and spontaneous lookaheads for kernel items k and grammar symbol s""" lh={} for k in c: nk = c.index(k) lh[nk] = {} #osets.Set([]) for (n,i) in k.keys(): lh[nk][(n,i)] = osets.Set([]) j = {} j[(n,i)]=osets.Set([(self.gr.dummy)]) j = self.closure(j) for s in symbols: for (m1,j1) in j.keys(): if self.NextToDot((m1,j1)) == s: for a in j[(m1,j1)]: if a == self.gr.dummy: lh[nk][(n,i)].append((self.goto_ref[(nk,s)],m1,j1+1)) else: c[self.goto_ref[(nk,s)]][(m1,j1+1)].append(a) del j """ Propagate lookaheads """ # c[0][(len(self.gr.rules) - 1,0)].s_append(self.gr.endmark) e = 1 while e: e = 0 for k in c: nk = c.index(k) for (n,i) in k.keys(): for (m,n1,i1) in lh[nk][(n,i)]: if c[m][(n1,i1)].s_extend(k[(n,i)]) == 1: e = 1 return c def goto(self,items,s): """ C{goto(I,X)} where I is a set of kernel items and X a grammar symbol is the closure of the set of all items (A -> sX.r,a) such that (A -> s.Xr,a) is in I""" valid = {} for (n,i) in items.keys(): x = self.NextToDot((n,i)) if x == s: if not valid.has_key((n,i+1)): valid[(n,i + 1)] = osets.Set([]) if self.gr.close_nt.has_key(x): for a in self.gr.close_nt[x].keys(): if self.gr.ntr.has_key(a): for k in self.gr.ntr[a]: if self.gr.rules[k][1] != [] and self.gr.rules[k][1][0] == s: valid[(k,1)] = osets.Set([]) return valid def NextToDot(self,item): """ returns symbol next to the dot or empty string""" n, i = item try: s = self.gr.rules[n][1][i] except IndexError: s = "" return s def AfterDotTer(self,item,items,path): """ returns FIRST of strings after the dot concatenated with lookahead""" if path: p, n = path if not n: return p l, i = item try: f= self.gr.FIRST(self.gr.rules[l][1][i+1:]) ns = self.gr.nullable[string.join(self.gr.rules[l][1][i+1:])] except IndexError: f = [] ns = 1 if ns: return items[item] else: return f class LogLR: """Class for LR table construction report: @ivar expect: number of shit/reduce conflicts expected @type expect: integer @ivar items: set of LR items @ivar conflicts: dictionary of conflicts occurred in LR table construction: 'rr' and 'sr' """ def __init__(self,noconflicts,expect): self.noconflicts = noconflicts self.expect = expect self.conflicts = {} self.items = None def add_conflict(self,type,i,a,value1,value2): try: self.conflicts[type].append((i,a,value1,value2)) except KeyError: self.conflicts[type] = [(i,a,value1,value2)] class LRparser: """Class for LR parser @ivar cfgr: context free grammar @ivar rules: grammar rules @ivar terminals: grammar terminals @ivar nonterminals: grammar nonterminals @ivar table: LR parsing table @ivar ACTION: Action function @ivar GOTO: Goto function @ivar tokens: tokens to be parsed @ivar context: computational context @ivar output: list of grammar rules used for parsing C{tokens} (right derivation in reverse) @ivar stack: LR stack with pairs C{(state,token)} """ def __init__(self,grammar,table_shelve,no_table=1,tabletype=LALRtable,operators=None,noconflicts=1,expect=0,**args): """ @param grammar: is a list for productions; each production is a tuple C{(LeftHandside,RightHandside,SemFunc,Prec?)} with C{LeftHandside} nonterminal, C{RightHandside} list of symbols, C{SemFunc} syntax-direct semantics, if present C{Prec (PRECEDENCE,ASSOC)} for ambiguous rules First production is for start symbol @param table_shelve: file where parser is saved @type table_shelve: string @param tabletype: type of LR table: C{SLR}, C{LR1}, C{LALR} @type tabletype: LRtable class @param no_table: if 0 table_shelve is created anyway @type no_table: integer @param operators: precedence and associativity for operators @type operators: dictionary @param noconflicts: if 0 LRtable conflicts are not resolved, unless spcecial operator rules @type noconflicts: integer @param expect: exact number of expected LR shift/reduce conflicts @type expect: integer @param args: extra arguments; key C{nosemrules} if 1 no semantic rules are applied @type args: dictionary """ self.cfgr = CFGrammar(grammar) self.rules = self.cfgr.rules self.terminals = self.cfgr.terminals self.nonterminals = self.cfgr.nonterminals self.endmark = self.cfgr.endmark if args.has_key('nosemrules'): self.nosemrules=args['nosemrules'] else: self.nosemrules = 0 d = shelve.open(table_shelve) if d and no_table: self.ACTION = d['action'] self.GOTO = d['goto'] if d.has_key('version'): if d['version'] < _Version: raise TableError(table_shelve) try: self.Log = d['log'] except KeyError: raise TableError(table_shelve) else: self.table = tabletype(self.cfgr,operators,noconflicts,expect) d['version'] = _Version d['action'] = self. ACTION = self.table.ACTION d['goto'] = self.GOTO = self.table.GOTO d['log'] = self.Log = self.table.Log d.close() def __str__(self): """@return: the LR parsing table showing for each state the action and goto function """ l = (map(lambda x: x[0],self.ACTION.keys())) l.sort() a1="\nState\n" if len(self.terminals) < 20: for a in self.terminals: a1=a1+" \t%s" %a for i in osets.Set(l): a3="\n%s" % i for a in self.terminals: if self.ACTION.has_key((i,a)): if self.ACTION[i,a][0]=="shift": x="s" else: x="r" a2="\t%s%s" %(x,self.ACTION[i,a][1]) else: a2="\t" a3=a3+a2 a1="%s%s" %(a1,a3) ac=a1 else: for i in osets.Set(l): a3="%s\n" % i for a in self.terminals: if self.ACTION.has_key((i,a)): if self.ACTION[i,a][0]=="shift": x="s" else: x="r" a3= a3+"%s = %s%s\n" %(a,x,self.ACTION[i,a][1]) a1="%s%s" %(a1,a3) ac=a1 l = (map(lambda x: x[0],self.GOTO.keys())) l.sort() a1 = "\nState\n" if len(self.nonterminals) < 20: for a in self.nonterminals: a1 = a1 + " \t%s" %a for i in osets.Set(l): a3 = "\n%s" % i for a in self.nonterminals: if self.GOTO.has_key((i,a)): a2 = "\t%s" %self.GOTO[(i,a)] else: a2 = "\t" a3 = a3 + a2 a1 = "%s%s" %(a1,a3) else: for i in osets.Set(l): a3 = "%s\n" % i for a in self.nonterminals: if self.GOTO.has_key((i,a)): a3 = a3 + "%s = %s\n" %(a,self.GOTO[(i,a)]) a1 = "%s%s" %(a1,a3) go = a1 return "Action table:\n %s\n Goto table:%s\n" % (ac,go) def parsing(self,tokens,context = None): """LR Parsing Algorithm (aho86:_compil, page 218) @param tokens: pairs (TOKEN, SPECIAL_VALUE) @param context: a computational context for semantic actions @return: parsed result """ self.stack = Stack() self.stack.push((0,[])) self.tokens = tokens self.tokens.append((self.endmark,self.endmark)) self.context = context self.output = [] self.ip = 0 while 1: s = self.stack.top()[0] a = self.tokens[self.ip][0] if _DEBUG: print "Input: %s\nState: %s" %(map(lambda x:x[0],self.tokens[self.ip:]),s) print "Stack: %s" %self.stack try: if self.ACTION[s,a][0] == 'shift': if _DEBUG: print "Action: shift\n" self.stack.push((self.ACTION[s,a][1], self.tokens[self.ip][1])) self.ip = self.ip + 1 elif self.ACTION[s,a][0] == 'reduce': n = self.ACTION[s,a][1] if _DEBUG: print "Action: reduce %s %s\n" %(n,str(self.rules[n])) semargs = [self.stack.pop()[1] for i in range(len(self.rules[n][1]))] semargs.reverse() if self.nosemrules: reduce = [] else: reduce = Reduction(self.rules[n][2],semargs,self.context) del semargs s1 = self.stack.top()[0] a = self.rules[n][0] self.stack.push((self.GOTO[s1,a],reduce)) self.output.append(n) elif self.ACTION[s,a] == ('accept', []): break else: raise LRParserError(s,a) except KeyError: if _DEBUG: print "Error in action: %s" %self.ACTION raise LRParserError(s,a) except SemanticError, m: if _DEBUG: print "Semantic Rule %d %s" %(n,self.rules[n][2]) raise SemanticError(m,n,self.rules[n][2]) return self.stack.top()[1] def parse_grammar(self,st,context,args): """ Transforms a string into a grammar description @param st: is a string representing the grammar rules, with default symbols as below. Fisrt rule for start. I{Example}:: reg -> reg + reg E{lb}E{lb} self.OrSemRule E{rb}E{rb} // priority 'left'| ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; where: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS rule""" self.pg=Yappy_grammar(**args) self.pg.input(st,context) return self.pg.context['rules'] def gsrules(self,rulestr, **sym): """ Transforms a string in a grammar description @param rulestr: is a string representing the grammar rules, with default symbols as below. @param sym: Dictionary with symbols used. Default ones: - rulesym="->" production symbol - rhssep='' RHS symbols separator - opsym='//' operator definition separator - semsym=E{lb}E{lb} semantic rule start marker - csemsym=E{rb}E{rb} semantic rule end marker - rulesep='|' separator for multiple rules for a LHS - ruleend=';' end marker for one LHS rule Example: reg -> reg + reg E{lb}E{lb} self.OrSemRule // (priority,'left') E{rb}E{rb} | ( reg ) E{lb}E{lb}self.ParSemRuleE{rb}E{rb} ; """ if not sym: sym = Dict(rulesym="->", rhssep='', opsym='//', semsym='{{', csemsym='}}', rulesep='|', ruleend=';') gr = [] rl = string.split(rulestr,sym['ruleend']) for l in rl: m = re.compile(sym['rulesym']).search(l) if not m: continue else: if m.start() == 0: raise GrammarError(l) else: lhs = l[0:m.start()].strip() if m.end() == len(l): raise GrammarError(l) else: rhss = string.strip(l[m.end():]) if rhss == "[]": rhs = [] sem = EmptySemRule op = None else: rhss = string.split(l[m.end():],sym['rulesep']) for rest in rhss: rest=string.strip(rest) if rhss == "[]": rhs = [] sem = EmptySemRule op = None else: m=re.search(sym['semsym']+'(?P.*)'+sym['csemsym'],rest) if not m: rhs = string.split(rest,None) sem = DefaultSemRule op = None else: if m.start() == 0: raise GrammarError(rest) else: rhs = string.split(rest[0:m.start()].strip()) if m.group('opsem'): opsem = string.split(m.group('opsem'),sym['opsym']) if len(opsem) == 1: sem = string.strip(opsem[0]) op = None elif len(opsem) == 2: sem = string.strip(opsem[0]) op = string.strip(opsem[1]) else: raise GrammarError(rest) else: sem = DefaultSemRule op = None if op == None: gr.append((lhs,rhs,eval(sem))) else: gr.append((lhs,rhs,eval(sem),eval(op))) return gr class LRBuildparser: """Class for LR parser: without shelve and semantic rules(obsolete) """ def __init__(self,grammar): """ """ self.table = LALRtable(grammar) def parsing(self,tokens): """LR Parsing Algorithm """ self.stack = Stack() self.stack.push(0) self.input = tokens self.input.append(self.table.gr.endmark) self.output = [] self.ip = 0 while 1: s = self.stack.top() a = self.input[self.ip] if not self.table.ACTION.has_key((s,a)): raise LRParserError(s,a) elif self.table.ACTION[s,a][0] == 'shift': # self.stack.push(a) self.stack.push(self.table.ACTION[s,a][1]) self.ip = self.ip + 1 elif self.table.ACTION[s,a][0] == 'reduce': n = self.table.ACTION[s,a][1] for i in range(len(self.table.gr.rules[n][1])): self.stack.pop() s1 = self.stack.top() a = self.table.gr.rules[n][0] # self.stack.push(a) if not self.table.GOTO.has_key((s1,a)): raise LRParserError(s1,a) else: self.stack.push(self.table.GOTO[s1,a]) self.output.append(n) elif self.table.ACTION[s,a] == ('accept', []): break else: raise LRParserError() ############# Auxiliares ################## def Dict(**entries): """Create a dict out of the argument=value arguments""" return entries def grules(rules_list,rulesym="->",rhssep=None): """ Transforms a list of rules in a grammar description. If a rule has no semantic rules, C{DefaultSemRule} is assumed. @param rules_list: is a list of pairs (rule,sem) where rule is a string of the form: - Word rulesym Word1 ... Word2 - Word rulesym [] @param rulesym: LHS and RHS rule separator @param rhssep: RHS values separator (None for white chars) @return: a grammar description """ gr = [] sep = re.compile(rulesym) for r in rules_list: if type(r) is StringType: rule = r else: rule = r[0] m = sep.search(rule) if not m: continue else: if m.start() == 0: raise GrammarError(rule) else: lhs = rule[0:m.start()].strip() if m.end() == len(rule): raise GrammarError(rule) else: rest=string.strip(rule[m.end():]) if rest == "[]": rhs = [] else: rhs = string.split(rest,rhssep) if type(r) is StringType: gr.append((lhs,rhs,DefaultSemRule)) elif len(r)==3: gr.append((lhs,rhs,r[1],r[2])) elif len(r)==2: gr.append((lhs,rhs,r[1])) else: raise GrammarError(r) return gr ####################################################### class Yappy(LRparser): """ A basic class for parsing. @ivar lex: a Lexer object """ def __init__(self,tokenize,grammar, table='YappyTab',no_table=1, tabletype=LALRtable,noconflicts=1,expect=0,**args): """@param tokenize: same as for L{Lexer} @param grammar: if a string C{parse_grammar} is called @param table: and no_table, tabletype same as for L{LRparser} @param args: dictionary where - key C{tmpdir} is the directory where the parse table used by the Yappy Grammar is stored; - key C{usrdir} is the directory where the user tables are stored - key C{nosemrules} if 1 semantic actions are not applied """ self.lex = Lexer(tokenize) operators = None if self.lex.__dict__.has_key("operators"): operators = self.lex.operators if type(grammar) is StringType: grammar = self.parse_grammar(grammar,{'locals':locals()},args) if args.has_key('usrdir') and os.path.isdir(args['usrdir']): table = string.rstrip(args['usrdir']) + '/' + table if os.path.dirname(table)=="" or os.path.exists(os.path.dirname(table)): LRparser.__init__(self,grammar,table,no_table,tabletype,operators,noconflicts,expect,**args) else: sys.stderr.write("Directory %s do not exist\n" %table) sys.exit() if (self.Log.noconflicts and ((self.Log.conflicts.has_key('sr') and len(self.Log.conflicts['sr'])!= self.Log.expect) or self.Log.conflicts.has_key('rr'))): print "LR conflicts: number %s value %s" %(len(self.Log.conflicts['sr']),self.Log.conflicts) print """If it is Ok, set expect to the number of conflicts and build table again""" def input(self,str=None,context={},lexer=0): """ Reads from stdin or string and retuns parsed result @param str: String to be parsed. If not given, reads from C{stdin}. @param context: some initial computational context @param lexer: if 1 only lexical analisys is performed @return: a tuple C{(parsed result,context)} or only the C{parsed result} """ if str: self.tokens = self.lex.scan(str) else: print "Input: ", self.tokens = self.lex.readscan() if lexer: return self.tokens self.context = context return self.parsing(self.tokens,self.context) def inputfile(self,FileName,context={}): """Reads input from file """ try: file = open(FileName,"r") except IOError: raise YappyError() return self.input(file.read(),context) def parse_tree(self): """To be defined using output""" pass def test(self): """A test for each class""" pass ######### Semantic Grammar Rules ############## def expandSemRule(strargs,strfun): regargs = re.compile(r'\$(\d+)') matchargs = regargs.finditer(strfun) for i in [(x.group(0),strargs+x.group(1)+"]") for x in matchargs]: strfun = string.replace(strfun,i[0],i[1]) return strfun def Reduction(fun,sargs,context={}): """Reduction function for semantic rules: - C{fun} can be: -- a function -- or a string with positional arguments C{$n} that is expanded and evaluated with C{eval} """ if callable(fun): return apply(fun,[sargs, context]) elif type(fun) is StringType: a = expandSemRule("sargs[",fun) l = context.get('locals',{}) l.update(locals()) return eval(a,context.get('globals',{}),l) else: raise SemanticError,'Wrong type: %s' %fun def DefaultSemRule(sargs,context={}): """Default semantic rule""" return sargs[0] def EmptySemRule(sargs,context={}): return [] ######Parser f,grammars ################## class Yappy_grammar(Yappy): """ A parser for grammar rules. See C{test()} for an example. """ def __init__(self,no_table=1, table='yappypar.tab',tabletype=LR1table,**args): grammar= grules([ ("G -> RULE G",self.GRule), ("G -> []",EmptySemRule), ("RULE -> ID rulesym MULTI ruleend",self.RULERule) , ("MULTI -> RHS rulesep MULTI",self.MULTIRule), ("MULTI -> RHS",self.MULTIRule), ("RHS -> []",EmptySemRule), #RHS->OPSEM not allowed; epsilon-rule ("RHS -> RH OPSEM",self.RHSRule), ("RH -> ID RH",self.RHRule), ("RH -> ID",self.RHRule), ("OPSEM -> []",self.OPSEMRule), # ("OPSEM -> semsym ID csemsym",self.OPSEMRule),#OPSEM->OP not allowed # ("OPSEM -> semsym ID OP csemsym",self.OPSEMRule), ("OPSEM -> IDS",self.OPSEMRule1), ("OPSEM -> IDS OP",self.OPSEMRule1), ("OP -> opsym OPV",self.OPRule), ("OPV -> ID ID ", self.OPVRule) ]) tokenize = [ ("\{\{.*\}\}",lambda x: ("IDS",string.strip(x[2:-2]))), ("\s+",""), ("->",lambda x: ("rulesym",x)), ("\|",lambda x: ("rulesep",x)), (";",lambda x: ("ruleend",x)), # ("}}",lambda x: ("csemsym",x)), # ("{{",lambda x: ("semsym",x)), ("//",lambda x: ("opsym",x)), (".*",lambda x: ("ID",x))] if args.has_key('tmpdir'): args1 = {'usrdir':string.rstrip(args['tmpdir'],'/')} else: args1 = {} Yappy.__init__(self,tokenize,grammar,table,no_table,**args1) def OPVRule(self,arg,context): """ """ try: int(arg[0]) except ValueError: raise SemanticError("Precedence must be an integer: %s given" %arg[0]) if arg[1]!= 'left' and arg[1]!= 'right' and arg[1]!= 'noassoc': raise SemanticError("Associativity must be 'left' or 'right' or 'noassoc': %s\ given" %arg[1]) return (int(arg[0]),arg[1]) def OPRule(self,arg,context): return arg[1] def OPSEMRule(self,arg,context): if len(arg) == 4: return (arg[1],arg[2]) if len(arg) == 3: return arg[1] if len(arg) == 0: return 'DefaultSemRule' def OPSEMRule1(self,arg,context): if len(arg) == 2: return (arg[0],arg[1]) if len(arg) == 1: return arg[0] if len(arg) == 0: return 'DefaultSemRule' def RHRule(self,arg,context): if len(arg) == 1: return [arg[0]] if len(arg) == 2: return [arg[0]]+arg[1] def RHSRule(self,arg,context): return (arg[0],arg[1]) def MULTIRule(self,arg,context): if len(arg) == 1: return [arg[0]] else: return [arg[0]]+arg[2] def RULERule(self,arg,context): lhs=arg[0] def grule(self,l): if l == []: return (lhs,[],EmptySemRule) if type(l[1]) is TupleType: return (lhs,l[0],eval(l[1][0],globals(),context['locals']),l[1][1]) else: return (lhs,l[0],eval(l[1],globals(),context['locals'])) return map(lambda l:grule(self,l) ,arg[2]) def GRule(self,args,context): if context.has_key('rules'): context['rules']= args[0]+context['rules'] else: context['rules'] = args[0] return [] def test(self): st = """ reg -> reg + reg {{DefaultSemRule}} // 200 left | reg reg {{DefaultSemRule}} // 200 left | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{lambda l,c:l[0]}}; reg -> ; a -> reg | reg ; """ st1 = """ reg -> reg + reg {{DefaultSemRule // 200 left}} | reg reg {{DefaultSemRule // 200 left}} | reg * {{DefaultSemRule}} | ( reg ) {{DefaultSemRule}} | id {{DefaultSemRule}}; reg -> ; a -> reg | reg ; """ self.input(st,{'locals':locals()}) return self.context['rules'] class Stack: """ A simple class to implement stacks""" def __init__(self, start=[]): """Reverse initial stack objects""" self.stack = [] for x in start: self.push(x) self.stack.reverse() def push(self, object): self.stack = [object] + self.stack def pop(self): if not self.stack: raise 'stack underflow' top, self.stack = self.stack[0], self.stack[1:] return top def top(self): """ Returns top of stack (not poping it)""" if not self.stack: raise 'stack underflow' return self.stack[0] def empty(self): """ Tests if stack is empty""" return not self.stack def popall(self): """ Empties stack""" self.stack=[] def __repr__(self): return '[Stack:%s]' % self.stack def __cmp__(self, other): return cmp(self.stack, other.stack) def __len__(self): return len(self.stack) def __add__(self, other): return Stack(self.stack + other.stack) def __mul__(self, reps): return Stack(self.stack * reps) def __getitem__(self, offset): return self.stack[offset] def __getslice__(self, low, high): return Stack(self.stack[low : high]) def __getattr__(self, name): return getattr(self.stack, name) yappy-1.9.4/yappyDoc.pdf0000640000076600000240000025136511404125044013456 0ustar dialout%PDF-1.4 %쏢 5 0 obj <> stream x\Is?ʇ17¾rdyUlYr>0\eK$MQ_n3h`LT* F/_/xW _ cBxT鴗3Xl) {ݰEq ͌0.Й@H ͤ6^pnRK(C"c1M:/b'<7$ƛJߧ9ӽpRc#Nf)ϭ>2X:aKr@:Ap ilv+zo͌Պ+83p*x&yH΅]W˵M!Hՠ1w(n4 ;+ pI5O:9_ӤUSI3qO vTgfAQpr9Igm6R\^{3_"Kx>&B[* \$) T4R*͂l`&rHe`u0!ҋAV0qf c<@3E{$ 8%y{GhMѺB z$|KOƉLqE0WP՗Qhl1u'r܉_kɃΛH$(s/ M@rEn/s%≸~CBۉc@Nsn Py9ݰ]`b̃~C%!)iҰ=M/`b}(`PJ;72#B!m*TȘtNأlݻB0]fbM1уqv`{y ,00"yn_b0mJZ#+J |a{y@e#sX^"!姞-+|h2~$՘. Ut9 =h$DIRM^U&=*4/Wdt| -V9͋ꆙ~;NKEJaTH8 %K 3!xuZbrtԶgd1U݁>]:ŀL&^! b$e8YM{WQ 6;ڞ5ڈqf:@0p. YK)tZW")I=/flW{7B1k˅Y DܱI6f0Qp[F (,H9( A(vp#IM~;Æe !Wwqluؗ&anvSԁ֠]F :PMǿӀKڀ_I`#_r]2VtA"LWg-71ĥB{whV%_yLxMȋ?`〇6Q`=Gv0;Az, )usѩ Q*#2Geog@LpGNJ *ZqQJ2JqIP >] HCC׆/fAq6藽k+x0}ˁ=e٥7Z _b|AR[D(^D. LuK%dSYiD kLQq4V-+h]An^67Nx(- T)#ZcTMw79YY$!+eK]Cɬބ˱.Ir ut$qR#bTnQVD2#eHMc`/bC0IGjh*;I]={lch@)E3r+܊Y#Lj00p#s33qIhzN^:'{n8?<\bwy XېA3 .$1U81Pw1ݢ-bb"Ɣ!I4ݤuJDT?Lq>aJ 3F G;QCAYY[a0|`8xʗMRvVkBTf$5-dPnj;/_xN,N5NoXxHp:<5qitLСrdE D++8sOcJSW G UF+h?(Isއ>LǔLI ?43@Ws &| Yncgw( =u_Dz荎K-xVm{Md^w u)b($&V f\obCY`l1c:U=:LBD݋UAB䖴.|du[*k3B,Bhd[ŵ&>7 ب+qJ!3nix=1q(w5*<@Dڡfs_q ԁ"tg6LIڦ,dKG`\GOuz 'b]J@хp2 T:qfG]`xU/H;$Jz7ɫp(Xe8n; 1Ks ux*C2k]ŵNGU)2JD**lDCb^1 KBƘ.h^c/{sJS'^K'iDFُcb_#B0٠79:Uj{OL;u6@%f<-I>dֿΡ;+T"LN5=&89)W&9Uf=)|͗@Tr6YNN.r9wh f=:d'5(n SC;ӻg[[pL΁[cm8bX__X9[MvJǙ{Cj9\-\3NUU@rƒ u|Mr }.cHap[arjwN#;qfpފ!S etH{EѺqAY #eSnBb$'<̖B;:MR||b\)thZ rƺ"y{X=ߛq_¥@Vg_ˑ1 @6*J 1Aj)z^O*nQ<\ LF q{-/X( .<K,XuB[Rޓ&f4$C^H94?RwX>m=Z%Y;x2WD˻Xoʻ)8qF97+k.; P:o]iVz0#^HlGDYaf.ZD ~q:ɼ4@Ö$!>d~pv O'60.kJ0fA guedR" yʠl>:q_}nG8蹖qU'J_.~{wTQo4xS@zzTޯs~3ޣvݎ+n<>J^ՑS˝gK~0A 8pL/l,z=G-H8fGl$ E8B'FzU>7m1WZe mfRjX7KQEB"nȬة39b0r; kz)K (9ܙ+ RUp?ǞB,b8'(xn¸-a]p(;OY½Ʈ!:oЉ۽(?J(IeG-zxXŹ51gE*+S-:z41+0*OMm7I` fF\׿{ jc~79΍ILIsnu,Ӱ5>$ u3)Ww5M[KU0UUCl_EQfiTE9gFz~ޤzFp%>ѭ"𢘁L2`}vn+CeNE<)ba ,6 bXFx{.Z~78Tr떊 8|Y.]d/G=@@L_*#BA[XW>"`qSt !Gzw[U2"T"P <9[ RC`~ >V%D<asK6>˟=kDk?axŪ^7:l|=l~^ڟ >d1.ln1&Ɵ+~ c4-?EAXI<ݡ;EК/,pX8@3v%GT3r6X̧X/+HBVqޭN 7b_佝fY"?%B/Ja5I9!>7b7J??endstream endobj 6 0 obj 4970 endobj 27 0 obj <> stream xn]]G\Gr_\$ 4 ?$^Z(YőnܫmPXjH8%lEb{fuzEWӭXX_G+GZolŸ#VP#V[o]J d!8Ul)t0%tBet:ũ{>"^ Ă>H8io HU6y!hӗQd%170CNx%=l=t mzguHI" s56DJhmB>!X8&&LU>{A_)'ק{}g@N_"Z T*34o 2w[<)o[`? S.;ED:!n2{R M=+pUf/H ً2<,m}g_$9`#p2h/xOqab(ݓ{c-C8.{xᰞg<]It}^nGKf kev̺2|woEȑTW73QV8 sq^5uT cހ2{E,2=avfXbB佸 7!-;!2D :)ޗᲵ*qg#$w&{ٕNmȰ",0[vmqĀA=Qi-)yLa6H#Gu9 Ƀc1yq5ߗBLuOzcYB~8;\Q3|/\&, B~Ars)b.ccuŇ4Ywm̿0ahSP6W^ 䧢qhzfqGQy_,Jfӽ VN49]+u/v_$d,`EhP28h9&LN&` >\$%[g$dξ*{*O ~l_յ] ʲ7s+ˆz:duMKA6rs.Vxsh+฀[x|H3%šWwݮ)X?uR>򚑠-&jŜօY 3w(;n&|sHuh/z#Rj)x<*]$v5sƫf qe-LDږ1(A6쨵wm%^hJ8=rKgTAlDK(ZK(`٨/TZGAfuW#K[&0B=KDǣ q"+'E:(K`1G;GX{W2? o bl|\pdLu=hVg6&6*hd%7T;Tڨu7(ݻ5J<:c4)&~Ca:a (߇)؜Nt*=VK%x0R)#2z9I)SdRUkGԸzӘU,W\)ki 4ހZB 5SiɌէx LjgU.8oc% ;I $"fMں`}m'~p2KuFn4VK- +PX*x0`rХ;Hbf 'ź%3ָS; Ejݵ64냛%yB3p,_:+vA+Wr-b >ɽ(=^06sY+J\ _x~P MpE;ifỸb,YFA? ^tf0-sJs{Zu{3Q>uuW7QBB&lP⼽"u-IZYV\C_]sp׋ / ;@zb)= !t +I]"aE-a rѲw~ [2zZyzϔPQqC՚pj[JeѫAmeX"JN@yB%qoBP_jnP5 )ѵ[tqg}UeSV $b$>F*VCnj/ _9P Jދ um@8[=Z ޒ8S}pʲâ,2ujr] =R̍R(5#1 # Ffx^ɪ|t%$ŷGٵx_ܡ|Lqp_S?D:@]@¬8~o3Ry`SR; C )PU,՜#<e%V>/屉f؊}; lj0US~1k&G@ ˭=|Hf2NC={ez N$쁘aYp@eo?Yp˲hنw7^8T&".tTm\zwev<ɣy)KWwv{kJ.Uۖf f!S0z!k[[lr-p>!PpmugV8Qφ{Qw1B̘ .A˚USHKendstream endobj 28 0 obj 3448 endobj 34 0 obj <> stream x\o.8KݖGiĮ% l%l@=!rݻhjA0D̓ÙY:# > =;#_0k_̞ f)1;\tfL3g{//@[o!,*ocm18$^px}/swYc5b9o 5Œ4K4PZpm`0\4%,#8WF'?:X0PJb Mi$n_Hb wSVD5^,iCM]Eb c%gx"8u :M̦:}Y<uʝE5kR |Ys$ֽy癧;=Tn &-x7Ku~= 2( E1C8/BxW%7oO8`r#_8 zc_!Á\fϊH a|H8 qr@Gx &nR~"DȳyYc(z çiodtoׂ>Pȡޓ֛8=@A!ű7I3"[s)lhgzZᔁ)9Џ;̝B~KzAg$#@WO=櫬Qeo1ڴl U-|l3b:B`'BGf;͓""P($q)ʱQݿe\JWEu鶸N^O"kb`JZjL*](|{Qsv*ׯ1_cTE^J%%.+XH8FRFCQ w1چ$6[(8- +̎DP ==(V'Eq H?:*NC]t{r14L[9,Cڝ($ FFA/gt]]pYh Tpci4-VADX]دøY,>/0'v"I$&NۓqUwnm+wt涼poe;.Fb\'{9:Iy& bLs[B6*"8xy&sjUZv*eQ)Pk5v`4m(͘hX1\4J0&J?i"zNc-dOVqfj 4LIJ.`t" XSEEp`/φ͓62~rR5 }vѺ7i貾Y16, /JerSļ# Y`'Dd: EQXǒeqV0+5_>5  zkvE(gn8BԴh%[UD0Gyl^G|%ʹr6$lʹĽоcl 7JW7*.u4.:[*L/aqt!"e/}s`gd r~U`8I\D\ػʒ7LMW>ZR6bLv\F5nܚXWS&;oe.ţ]jb /RӉyT4Ubyh/^a⾬=Qte֟U%Xo,JwDEi*D!~ E/)7p83kܗU`g ^Y8-КUT\FcETOoUE*։F aK`–3`XTF- BI0AwlZ*ruHߝ/,\ 2͕։Zz_-XcK\6 ZX{EG9$[V=uyt7tB~ʅ+6'ԙ] >eO^d}9)GWz)T}Jd_sl!E\tt6:B:σTI6p3J78Y ?H6.B:9ד?94)Dڞ^_GRns|G +6l:gyL ʧ-#Giv\m“q岙 s⪨/.:P6\k>9RU>CnVI F nS4\),>#BOT]sٲ\>W醙e+H?~]%ݙyba< ӭe"NZH0+ڵ0i({ѺQvFˁL@p wBQ2HmfRڎ%VX܋Ɋ=TV؉]@\YCC,v)6I3U*jۇ>3]Rى=s&M\Fu8qR߂q~#d(ρ 62*0~1J g4&6%ꎑ=.-S3y f]KLvT+1{&3\8f vo ڵguZpP,6SxOwRa֍BE½gzO6t {,'b=MyؼM49r?h> stream x\5>x ؃]|AI>dJ 3 ,Vv~=Gq:U~o7 Kݜcg{0a8:|rat/7mrsxwoԠ>ġ[xun{cn~8Ƿqhn/plnM-VtpG04h9=LnI{ =O?J[HInV%?%oAt/l|Z,@8uv_.tPOOQ+9|jS(Yp*n}MN}O" yaոW-떭Q O')ÉQOj,`' m븄yxjʍ<']UM4`<8p6m ` Q=u``)'Er#r_%L XVw@ƈkCWu!oAƂ&Akx0:\ FŠiQ)t“)#Z kW|'c-PwdHP4z X"M,Qk$af 0j'OFda<9&>m&Ԧx  iᳮևa; e'y JT. O 7s2`iwqf4St޵W ]K(N P)p#:ԌHRiQiLUN*3Ml? ~\aJg30%Wnk^{ʗdj"O k!ᩎpbR.v^cW)X \5SXe .2BcX0h0d6vx& baA o[u4<#͑r]ܚqZ{j`1^&ME 3"#U} ]{)\#8Oഌ1@J\jF$c42ar5hPM_P0G]sa,W|u[!['$^|e)ZΗu(6ڄ'*P3yb&>f &amr*Fz2I댝A(PDZÛ`,?ǖ=!(Tim;N.J_ĎTf )u4^q~B~iZ@'f}2t*p0NBfq:C/!Q5Q1ctlUfZVInum&'?eSېr@_r#V͚>F Lcnۖ+;x+\Ѧ奒wM44-L)`J/hY`|]A%G]\&R* Jޟ'ENM84QVՔzW+h aa/F)KQSo|u'} Teϒ>m1HR5|: |  +uzo"@TJ*%M 0-\FQJHaiǼI/^ fbSB_[ KN5u}.^^Mphi˿l^??BZ}/O`iP Q*[v$!1k~:g)1[]13 )D]Ci:zY}+E E k vm%g [2)zpi'Fky^^sZ/E >ҦRƐۏL#10{,.jŚP?3(4+gum(!=RMkiaH*sNSRޣ -iU`JȬaS ( k6\ @;lɴqo֯$cI==$$3I0j0VU-A/ou„8:`|mz>+ܩq:y+j݆O$v gIR?]OgoikQ+G^"( Q6q(n@kzƆP+lэBI_1|O7:xMCo^c+,☩e&%q Cqxԝ{*}IWzb˵q.x¾6aZ8?ަ'Ǻ[o&< o3S=N|ybh҂9>m!ߧ~1¾endstream endobj 40 0 obj 4251 endobj 46 0 obj <> stream xn\}_ 6@ZKSHaĉ>Qؒ,EiCpCje%F`Ù܇aI [Rgl32Mђ'{MsVk\ %1tǣV~=ßGX<|d,scK4*#IYCu΂]].Sw *fXexuЍLR]7;]n}c CN+Iĭy*?B8֬D j$*A>Efgu: /jgضb&MELz{mT 6 Q͎~dnj}pwp(yxq 6hFG 3T;iN{K>߬Qp8ƃy2Ϟٳԭ1:OY4yb a̳Wys/RFLdl2\2aP-C u?Kw#AgΩ`U wuRxtZ~]#Djg""8en/p0eaTVh7)e&WS܇^k$ɋfX~[c~Ta]zI+7;/́[XLu#)'Җv=Nb\|etgçJr$ν:hZ#Ѕ&3QO7r%L '޵̎د.ouݺ_{A\݉nk07xZ &86ӝܱhYyXG?D_ *>ov|gr.l ÌZ]Ϩ \5nێ+Fdf+Qjx,P׆A6 v]Nמh,@7zT(G" \- 2 mMEOvD2`aӽ9ư9F (VVTʠ޽ZJytLAϖtk~&t5uZk*(Y)HJ#]&a6c\V'#Pt9QzSP.Ҟ(;~DJ}MF QdFQEٳ;æ&ڭ]`a2xvϟ~`򄻈 03Me!޽즃X|m&jXWCLJQL*n*Ϟ6u4*~檍=Ɉ$C[ HAbCKm8Fhgm1|PP\cq ts/ƍt||> f!>W*M`o1ߨ!Uՙ*L;&g 9ZČtS3ٙrmLjA_6ݞj lmAh28l_AIsairm EqL[y ?QwNSbk5NN͔1qG6E1G.pElWܽ۩w [5=> stream xn]]G\Wu}q<$ Aۤ. Ų#"q!۹WD9 }&4+X2+2sum'nxíttW})WpDaY Bz^ʇ{ŌH\Z*L+T<< P! 4+qpBQ+ Z{o`P|Y. ttI^ڃZFB9RԾLDy-aDcAm| 5eu22ĴD%'9cA"^ GzgY$ W<-W9IabB 5\I=xZI+VOv$[*  7g%v: rC:rgUU]4V%QDP20)6ux~%Hr'xL&j6qRyJBM>A;#Ep(._kZbWIE|H,#.  !TԳ戹GXi͗yI}Eu{N5=Yf0rCǀb<SC+~mN Nμw;I?ty6`uf7ɕii&@ 5x0R(:W2䧁65KNvu AU"':2.|5 (_j O{hQ iJbю;1TGu@HʬUx!"Y穚'05 ^X6%HRZ-Zx :Z^g{5y)*Xh#Y00'R`(MhB(C꺌{`8Sf q(Re. sUGèM✢Nxa<́^YyaYBs`1XحxQ(ouef"U >GY|^g)0(mϫcrzȓj ZML﯎%빿Td%|YCif0P96\hH>[G\;5%\G~F` c}7LF#G~4B Qʠiv$h4Q'qV`5l a词4YЃatC{eӶ.Ǡ:y7a!~yJvLoѰ*]{%t K㈐PXQE%Aa@'b.m%-*O)/DHwעS諬5Ht5InR ̤i0ct%b&j]bόboTP @Qr:%;VMֈJAFM7~|{Jdw1vlpb'k,.m&z ¨Jfo08ŜMdl@7ss\`r.Tܡ2P%0(jbɭpx~43 /l\|g I^7Uއd/ ְ }PbFu`7(Y@px Nˁ׫p +8BO_rky}注vD< p-y׏p﩮`Q/%yY!JuIĺMVϬ%ejƬ:e@wӵ*/\Tܷ49ОM]#u׽›{ Z_<仭@x-s՝p~\diKJֿт9[h`'Z_qr[V:vRQ(]8:9tㄬZʬ/N>/1"RjyJ9^ 1.VK?!qb ^ۮ\P6z/Y\Qa Lz^p"nYLS(k! 2׾WU=47аjRJF$ӄʉ%mB/SGBhij$09I++i6? i&p!)^eM,Zb75S'4rk?K3A>cnD'V?u~\Cȃ Q<}%{OY IE$ scBiP4?Ol3>6,"0DŌZR۾ +^g زT=>JD{XrmꚔ]RFFX)CЀ ス)s!g\ֺK8Kc&HT3 HNJLDA]^_< ^,v+[Rۙ a[X9ͽ8iȽ?Jx gUv_#70TN}2'U] P0[vrƦ8g;0#m^Sv˸#2UH}?YUmqTRݻ>+Сu. ᰙ2:tK!G@WŬ4-5ç#;Kk2nŢ 3U0]['iUⲬ,4YK$ё0jD3`*x^ui(n xcH$2.8 Y6F$`DHmɌ^8j:vdx"ܷ]I(il+z<2o"ɚZ s`Qʄa 2BF@ӍVKQx-T e`np(,ЬF1k^Gz`@x˔cle@ňBwA)APd ] 6bMJ3tH"HݍfɅ\B \ `XA."wӡS*ھDJCP~0KH`*EWA"iI8^H"C+F¯*FbKPrꖦ$5;Gmo/)0Md <9I߮V&E7h 'fw{GN]͌29bk6UUJ5:.'"50H2ma9K4ygw YXdʼLYzxxgZyircgUGSRfr)qI{~سyE׵F[ z߰Uŕߗ;WLǜ$}49\F [s9\X'UMcXa y}(&E Bp\<-6N4t{}z.aS1u".Unw%P3m*VTӧfs^,K߶ rdG`m z;O&$MسzwdCRE2R+Y(ood)=~Tr{GOkm ğ4I{kƅF@х* 2['dqj[_^n몹7qyd ~! XV&WD5AeǨSfI*OrT;nC=0b `Q'b1mz9N8דUz_uzcs'zν9G-o2D3Ԃ#r6+>lcVMp,G8 nDm܄V_#S X 9U oXQ;:9y+i-Go٧%+b+>B=l&:R_룔IW6NTiJ- FVv첛Ӥ[޶'Q'j$/{Ny:5g*!HA\**3׸EQc\p碱dÐ Kt]a%Jp骸A㷹Q";I:VI_B_͵-Bm~%%>Y nO@'fn^Aa.nj5zĀ7IƋHhXUaLժ%MpgWs<yl|/K6Rq;n ȁ182(_>RXendstream endobj 52 0 obj 5081 endobj 56 0 obj <> stream x[Yo~' !KR;kP%; ! $K.E "W6i=LW;hC vQ]WGߎHCG}tF'Wd}v:Q>(Q92L ?(toM0ϵh؉Fj6>ٜmxQ(Ou 5]>~GwKUuکWq7QD eƼ8wj4ؙlZ19~G_}FRFGJӲl+& wD(^Djh,a X%*U+}<إ5^;Է1< &*4q Z괃'+z3 7[EtB7#gB1u{X=|3ozx%*W2&GSfD|s:8YåMo k}Y4GQkV _V:k|1.Bծ4"1% ,fq4vέpt۬7OB&*.ͱs3U=Fӎ?NC\Wc Ќ4!/*lxo*i1i{q;W»2^]<ʿevy2ő| n)fy? ϣBQzxuE F2"}>r9RR(G,#~*B3"m5JA}"a1բh?jQP.[JzQXUixIς^u ,,‡ QLV^S?o>UgWD;TzLj4JA- ׍FSr i8oY[j[\i-4euW&Q?*Fh{{\4]uI\m3n>DC2kGWz@GAQ>(a?GnUVmeK*mxсM\)l52S(1lU;vvg(0/6'LGh8vm=" 0’ÜoWcvqr(" w\ID#uxYn/RG5i L@'VHɃ[*M Y Nn+ yML5($Xy)O YQ`q>MsM ɣ'G \Hj U&= Dbc9mRͩO?҂*SLs>g3)cKrۀiN< nx+V0ϼn9TY}dj_B5&fŠahludvX\ab H[¼#hdG~#mi!["ܯx,hпH`!,.o)_?tf*|S%_9q Į_^tGPԆ  5Â{s;߷4Bھ[ya~]\'b"d{eI7ohZN|Tʱ -̩¨Դ-|6vxDi< 5 ܴ~s)wB^ijMNM‘920 y:zeÖz-g%--@9= uLK4e HAEIIQ7PuhWAQYE,-& A}ᖦEL; Rҵ>7mۈVV:j;tV鯎H(#Vd8N> Ӳ1^C 힅]dIGv5? jJ_VQ{~Q^˳8 ^WE~x@vObE(b5m.%dZy#4dA n44:bd_`{xƳwBmr_7ᓛ|~s@1fv|%wxK/'O0b..;){uU` &M3~c.Z/Y]]%uz=> stream x\od7=B_UH܁DPU BE<ٰI&$l^{24~Έok=;= /08}z*5Y2ӌD߻bpJݭWC?t2!bAق5/})|?]޾;^/V7wk&2n6ww} 9}upTZZtҘ$jg^J|ϗuAREulUThQꑎ似89~]L[.nOg/oX2)gx}\7},WOtur"? zM;|$k*fT\(^b =,+KYwڂjmtwsm&d[f+ޭ}?Un3XHusHJ0 kSe^2܂n lw Dc߻Ewx dc#FPw\[fr}s炄7FYeYTY J83&~`AF8{!#Dr.5  UZ'Dt tA&*St'̦`%p2 Zs:uMaτtA:M1q# *.ч!<4AdI1TL ݅P0F'X0,6S4*Iz-P3`%;ѬCn`sx譺di;(dac2g*ŀXiH,)<&򀠵_s{!(zւ(]&ȤAh.,87 "J?ċ_G_ PX;$Ψ ƥ73N t7aMr&Vx+6#̻ɐ6)fJ8XuNPC+M > >xt+|WUe:fN*%Յx75-xee+5\M`Łu:=̣֘֙y`ּO?jGZԭ#1c_yIiEkn0#4,8~B!\29ISQȆl,glTfI۰cdzl}סImBy" WCk=i'x@mN*)eoR$T 0L/+t1C& eqWq9Ä͏3ATfꁪye k?]J"Ӷw5 sSR5 ؽiwK0{)1"ZAB`+qrN?gkS(Znfd[lShݺ)p [PBˇJjBQnI W]Ouܬ>PpC8xp%.Fh^ީw]Uhn0^vO@I 4>#;h##1 ̛v^4.^p^ {ٟ]bkގa4ĵ$eKAw&~bρ0` yiw{'C/@RAҽsW_vxoأ $ܼMaHZ Tj7M ~SOZ((/#zT2biezi'"0]{{^#\3Rő?(*wV2L&f~eT}9^xEF[o[[^NoLU+m*>/+D0ٰřcJxov!0lҌՇ7]Dɓ#|2,&\WPTtIBPd]@'7%3h4 G 뀾J[b\lZ +G_1e4(kaSʒOUǘ M0kF`[aL-L3ʸFAtT]kh(QZ"}ų{Q0{k!lM~y~r#{>70CMB9D̈3Oxċ\Ғ`SdfHdl/AѫC0lD9֟Lt::}8`(v3,=o~e%8y=Jߚ A{ sIAr1[)^C}cn>ថbCPa(RXSAPIY/@\f%1P>UW?ͽ/0&GlF q!DT74~[y_I~-Tȭ,:"7,5<|tQiBظãcm˺#XPa]b` p1(endstream endobj 62 0 obj 3927 endobj 66 0 obj <> stream xZr}g!9-$$@"u,+_f`vT.h>W5荒?G'_͓7Gjs=9zu˂M揧XF`6njTGmXk]Ty{vc/vj0DdC ovV+3s)wdt 6meMB/C&8!9mO(ks>>LRX΃ g?qu69 s, ,XsF!a2 6n{Ap8TTa@Q뚫v:|*+۴&W[ E:K*?:gv'!mNX2઎Z?MY>X1:$JBPE$Sg);SYd#l'1f|B1.Ȯ1q`uͭeN֊Jh[imLm y\%le>$oaB踬A|xlU58So}b|q3 MPYX |×f/&vgdªYL^`,qTq nv=X@utų-KvݗrшDxgGPшivg2=<8eFmr(԰*'͗hۗ l$ֻn9aڎ(q2|߮[:c)feM"j/gscÐRzm'U7S fO7EA(NًiHqx>F,Bm( ٫ihC-R4Kg{E ' #Q Qo[;{<]n'u\GT/+15z `pȱCej Xd%|QHr&=k{@ngq4*\^JS#_@v9px,Nmn:]#;r+RY8 d=DgY|S^^*>KgԐ! 㫡ԋ\9\^ҨK4`!jol'''޽χ/7'_|zq;}~aTgzu[8z8]mJuW*zʾ|\mAбܞ| >-y\ ]5`S֠;t-cXM%ê36L 7#/nI[* ZU*Q&MЄ=,*jVeqST٬jhM!_F.3^#pXSKVqե$5& pM&j9 #pihdتU״#'g:RC!rvDž:g+T҄._;d$75#5Ev"GTIIiR>ҙ p dQ_p*MrQ!eG\^usRoh4`-%s6[w?IͳG_LɌ_*LhIy\)$~ɷg JyIcl̈̕,Pj!A8=+2!IcCUs4 -Cy);w#Z,CSrYՉ +ti[mCl]O -܏GFIJW^y3qCJVo|LYv^\jNDTN% QiYCGgx|qau]!ix =s`dyxyd~#<_75{z!>:ݴqǔtd2Zn<(_d@fpj1PDnC`J,l? C\$.IxYA#΂{ ލ|I's ^6HYxS55KrR0m:%VH֥6^_Iփ4d]0^ 2uv;5?p9bWnfuE42]+\>pk a쐼Vy5.-AbPwjfJYjt|:! SdGe>1F pcð{|'+CrLIq!Z.19AWL|(2Ck凮uCGmYA^ytO8Dynb8m20)Cʙf,Zck#Pw gQB]\J_@%r~tz64e?jie!Hq!+G=.WZ5naFXW]4>swV}:Qn{ -=o#w:fpڴ޻~I.72C}1e&@8K`,d͚;*>嗬,߻?}T,C`ՙGSy26~D0陬,)Ӿ˜6,)9N bB _jF UfrrRqPBR-߾,<ڃOf# " iXQp_WƐw}2c[sdGǟ^)endstream endobj 67 0 obj 2849 endobj 4 0 obj <> /Contents 5 0 R >> endobj 26 0 obj <> /Contents 27 0 R >> endobj 33 0 obj <> /Contents 34 0 R >> endobj 38 0 obj <> /Contents 39 0 R >> endobj 45 0 obj <> /Contents 46 0 R >> endobj 50 0 obj <> /Contents 51 0 R >> endobj 55 0 obj <> /Contents 56 0 R >> endobj 60 0 obj <> /Contents 61 0 R >> endobj 65 0 obj <> /Contents 66 0 R >> endobj 3 0 obj << /Type /Pages /Kids [ 4 0 R 26 0 R 33 0 R 38 0 R 45 0 R 50 0 R 55 0 R 60 0 R 65 0 R ] /Count 9 >> endobj 1 0 obj <> endobj 7 0 obj <>endobj 24 0 obj <> endobj 25 0 obj <> endobj 31 0 obj <> endobj 32 0 obj <> endobj 36 0 obj <> endobj 37 0 obj <> endobj 43 0 obj <> endobj 44 0 obj <> endobj 48 0 obj <> endobj 49 0 obj <> endobj 53 0 obj <> endobj 54 0 obj <> endobj 58 0 obj <> endobj 59 0 obj <> endobj 63 0 obj <> endobj 64 0 obj <> endobj 68 0 obj <> endobj 69 0 obj <> endobj 20 0 obj <> endobj 18 0 obj <> endobj 80 0 obj <> endobj 16 0 obj <> endobj 81 0 obj <> endobj 14 0 obj <> endobj 82 0 obj <> endobj 12 0 obj <> endobj 10 0 obj <> endobj 83 0 obj <> endobj 8 0 obj <> endobj 41 0 obj <> endobj 84 0 obj <> endobj 29 0 obj <> endobj 22 0 obj <> endobj 85 0 obj <> endobj 21 0 obj <> endobj 70 0 obj <>stream xWyXSgֿ1UQ{̽VGZWtTPZVp,aILXVeM)j ZjZWVZO{n>(_{w+$(HDYr忱H~_kK^%2}\ AʑDQq)a[B'Ϝ9}Lg0yPO2482HI+axqCh>PՓ"&)b?Y u^-xBEyYPdsszWDF>QERD/]T4 GL6}L1'muER~rj 1wj%OP^'|j- "j1ZJPNԻ0j85Iє5G9R!PƓQj;UdoE/ī%#$QZZFIddXqx HvvOG^$D˃8It*I bZǦYtڙtQTʃ.EM*Ham( Uᵨi Ji,wk@r2::[7@ &3ł׶ \5*ͥ+M֓}:b#%桶8?Dž˶G:K)K7AI*0`qs,҉#v|<}cW/]d_//&mY 8K y4$b@) /$0(!*?'H m\pےE^ 6{ 21^lRЭF@Nûo0L<ǒVP=}b,μ5&}=P\NH]~ eu;ZgX7ȚR 8,c.'=V-?d>BlxA Nt047#cTV!!Ha' )nY v;҉Ѥ,ilVN0fp-L$<%6\#m7\% OrU:/?.UbMƝd 2ႚKg)jUK pB#=ɥGG"JxyulĒEi{8'Lhb,xOL"xeЏw[5boik jn Y:K|BJP &3ó'j6p'djzasP}bn:wˍԣrf1+TЊ@~]jj} au4ΨM7۾h^Te|UiCg\]N9*D̝O.L<,g^y&:U&ļ hx+,Rmr(1~8>rbY{c6zvoT&-QZ$;]AvxTN'ƼI;%s3Ҙ{f!]MЯtf o'jkJBx? VjuIvCy{ P%Ө@ b)9{IIZ.Sajz+A(?{^A*aH0-&K)Mޭ'9_2sw,Gic3 *eE&ZްKm"~ ! b IH¥%%y#f+O7g\^ҥ,EL^_{c ĞHw82f'5T4+鰔P>@Q5an۞Rvx9n*s۔.`OH Bn\EJx0֖ÇpSwzcB?4u]8ڟb/@"aMY}%:̒a;LCqh}Te JV|h9~P]f+~y+gi= O_UE& 4^Ö/TRsIh-ݏ\/6!1O4%Ju1b2b*ʫjU vEPLr^ǧ$A1(Ƞ0hĸ Cao`4^ clliooi9}eƍkx^>!8vs7y{׀fnn՝"H!-kγuJBT*Fe]Xa_d&ڱC6'!%CL;H!&Wۄnq{MHZ̏ow]bSp ӹta yXml>| =gnmr ?k4y}MCjAzWvdi|c^7Wqqb_^tԘHCB51U 1qʤ0'+`08@wLA'en$wA~Y_]m.J/jM(1W[C6)7mcwoT[c֡$0$:G)(mcr@o6|_mOxIL- X++yݻi?9h{`^527@o7H) endstream endobj 19 0 obj <> endobj 71 0 obj <>stream xyyXSSC<,2O2O)$+ <`@@uVuVIz_x{}׎A ѼkǍ5c~ԃ@膵[-h5?ҽ6A@hȐ^avF؍:u(cN%nKK>mGXaaAƌ ::0·yۭn00 npM yحt (jɜ^0tQbK]"\Gcj/}q)M6aGl=϶OG;n(Cj5ZMMRkǔ=5ZG S# Hj# KmQR[hj+C-Rqbj 5ZJMQdj5ZI}FOl([j5DCCfPT*ޥR(kJLGRRʂR P X-,eHC&TtE:{Yo}}ʬfZe;ݺo&q{I7!<3a@A;׃^J%S8x x0~Icc9L(Pȓb[O6ܢ׫?L}@ShUVb7+,FҌ["ß5rH Ώ^}pUMJHzĤjXdoXbȊoo$YCzRQɋjG5׼boO?~-c!T{dO=0_I2K?Z|RFU& :`$mԓy`ѤpI ˶LBxI=s>ï\t]#t^J\?HlԫX#YoA^#Ah5iX T=mBapo$~/H`:SiPsA8 uRဴ¹րxz)`30w t֟=R Sh${~tcADL,੸/A_$fRmK)8 ."[Ȑ#O Hw/^&gNa,K"k:\ib@LhK%h]dK6t?ƫ$<;t 4J6_]DmOl7{Cȸ.wowzwClUSᨲm8Π3Ԩ׀A~H<)p), : ̕%`7 IL)'8=^LX1+Ս"4qh8Zh%ӎ5Ag3WpQztՙ`&`a*-Ak|L@wE>R^b λ=E9Rk)~Rzn@|8=1$O񀮊"?72PܕF.}ik3H14mѣOZ v e[B谹x60ضmJO}'K(D4I7EeESؽK(I-;tw?n^]xu:~`QK6П_ݢhrwsu44mt 4BL`iHS]W̉1Τv$j6N͎lg(}*QaWУ VT蟳nJ }HV<'񓋭k!_H}m`uw`m?Dp\U.`b!1Rm蝐Vs hR8|wO\ħ=%}Sj+4:> Œ_MmXN+>YQ ѤzV wK%Jw1$?z!L,0ݻ8p͕6e 4ޟ5!O:?dI~yJlKZt3?GZ^|8x2] 'qU4 +AY෋hA~(FOD̲D”0mf=WeA+e lۊPD寲WAMFyMZ&)r!%3%"K;,A#@# їh8;f"8ŵ쩨 , nqsyU+fKd ñsdG'ʁh0^Gބ=av!%pDêS"rP&Ŏř6QurʨӻC #@I U=(bSp(TCҽœdؠ289_dI&IK<)2G5F!4 _]ŃquM׷>BS.^! ݻƦvilmɉ{zM\|(C44~z 3uE%9I5Ԓݵ[9|}ē8I@L{r#\&}GŵȗMmhjGc W QcwPטbLp_LҵAkNp2#]I3mfoAiYbZZ2U-bSk`GWh &eYQ\]U ɐF2G] ~C&1)Al-Ķaѥ.bjzZˆ7m~`9gHbN XJC1 2A]pGv-rU?^įh%a53FiClHQ I͊BO\AK^N<ɿ!1Hd4(.4]!?4@ŹxבࠑcP$FJ1V`!%g)S+ꡘ3h}5$c.$ QdHASQ.wl:rrD j덞iqC*_UP ,JҩIjU&3NJ&ҋ%az7 VmQlxA$h3K\5 /@gR44~/0/@w jN O?!njII!$,J$+ J{8mOXpHEҊ)Ü^;<#9O0/Ÿؕx,ZO'x(堏mǙ0e 2-x7C'@y<1_|qP\\rd,0aừK=k6,a0~7sW@}q|jG-/(]dLꍨ~#@_5RBǩv8i I!(l ^mu:_ DxV[]dU6s|C$ qanH; )PVX?QQ GՇI֥GmЪdI2cvdU;]]8 ~FG7#/j31CVoGoTm\+ _:C7eH ُ=ּVrZtY7hLm)0B!qw;3V`Ћܪ#L#=Ck2#( bQ efi"0t.y>)qn0?c]<[n뜆hZKj?X KѣK`nީLX'$ޑ!yxi6K$xuAM=РjP(/_^d;#sӚRTU)h*ju?5j+/ho>tEףm,5:Gy{qh#]"I$c"z4EjJ $IxJJb]%HHKhR㷡2" <5&H^w$8¤5zm.Dy3KL?,k{ؠnz-}_܉lm&ҷhy|vGh]g /,% Y'3rE)|+zr?#Ϗl[d QAA30pEVؼ6Ÿ__HL_denr1"rsnIAn"Y ,}>"yslnCYQ~q<,ITHb>vU ;bUocI ٯ._9qZ9]4]V<risFGj=Qk&0d<Ҩe 0;2VΙqy_>C g:IݣePc>Bhˬ/cl[sfow0/w_|2[f%G:L;~ݖHGg1f3ߏ#',46bp(~"RZȾZ"5 s|S6#'pF|_ u?T %C{R4۸ޫ]Z8SAlNbILMNb߯ . KՀZͥ5&oPCLc9*f,qc 6H*9Q)ݷ]I*dHEbD|gF6Ŝu]ՎƎtm'Yj깋NG}I=5Ad`6Tמ!%&sYCŁu=]gaO[wը2tjLzKuƁɕǞ =ܝ44j $&[BgڮF6X[ھDʼn,`BY5>4wEreғ]ajngf>_b3{KIKѦK|5"nBZnPurzw׻'',2$,O<ֱSӲ|ħG]چףge`-0' ?o] ~^g.-dt_GP?mNu,ƼQ*lm*bR^W{vpJy D1>ee{Nk3ڄ`[JJc=]Ȃ,NTU1W:9HrrdSsnD41J.">$ڿ;%N:YJ|C$(>$55+] хՋTkBG2V%O*\QӮĄ%ģ2"tWDshY/1ݧ. ݙpⓏg壋2誎8@4g8|!/k$[+BT Pe"$UwdPݩ_4Z<𴷤6ɠ}L]PI@oKY=|#[Ý7x$h*;8vgV26Yv30~+}ZT>ӹxJ'VҊenTz:Wdp#j_^y_䑜ۧ3 31m-Cx;`I?Ps*z284!q i+Wt"Cz^R]fKz^ endstream endobj 17 0 obj <> endobj 72 0 obj <>stream xZ@S>89*.ңsZwgQVq+N+^2f#m㼵*VڪoG$@ҿCA=||yyDe݋E.'9F?LK4W^nluA4’G}%v8y֬&8N4iON@Gg\; )r7[" vz^sߙ#v\)4(P#ѴI<Ŏ.A<ŁE]qaЦE// ]&(9cE䎕Q;wxj/5>k}]0}3G4s7㷼u¶&?)ftl4A}L͢FR)'j !5CR8jOmRM"=j3B-ާRed#ʙJ-Q+JjB}@fR!=@YQC)kjeC hzSTm/5PTj5H Rv6JDQoQ,JG&~^WGXftP(YTE}7CۅЯsU;iڅu*ޢjdW_ =u!rWlچ2尙Rbx??^s9)w{s<~_?q v<5o3⦾iӂNk:=6LQ J:2$A)Ԣ2%]^#_ ̠FxE jTοjQÞԂl>%3 IX%} l=m\ڵZg:p } N&T[HLjD?W0.&6Q Kh9W1#v ޳'_l<:}rdkeZXM%]b$GR1 BUbi|I&XMKjU#pLP;TW4<&W0_+ž{<V`C:ⲕ>R-E5Q ɩ< cžma,9P7 滎{CQ|wBw fa<91 iFj\XE3sHP.ri.YdSc}hbb@G)?-߇={ǝtu[-(A9"9MJ #Q>FsP(;(%8T ^{s' T#EX#uVv씅=W!ô0;Xy~M9{`+S!L Fxeyӆ:ZUBQ'ӼUVpOpz眷)>\gos5XjV?\ qвTJTqGfn\ҕ}4e=dbszbU| _:Nh@p{2#SkD37iiwS qZ 8y$+"Sdj8 $SFgD7vV'4dDh,YιQ$Ʌ%DƋ&}~ݾ-+R Cc?^ ʖ"\C_xݺ}hG9R{*£9˚ b梳5 ЙZgnGk:eb_,q~ؙj`vwM?X#]Zc\ƻ}ȭJR';F{8}D>.'<h86T"N,X ;O; wj}~!K8X`ܱeD` 4"R# T 2L>ڝmNW[Jq1⑐ăƺxha:DߢPR(VAYż$^lzw+_rԞ{'Z8]Q^n( 1.;lFg_G,LWj`L6{ tXmޞtϘ0pBopę6=u9)d<+-Gt 1m>Vp,rjs.LJ\/3Mmw ƽ͛?NJwPЩ䊉l_f70J@rp@rXq\@<*GGx8fq=B; D)1 d.%6rXtS0A++dՈV`Q_j2u&sl.j!CE:6 2P s\=q;70w8z!!R9oP)wGaX, 3gXmW[׺Y IdJ3B Jsꃔ4L]E Ex}Ngۖz*/[ZyT gZV]Z^3;X/ '̇m\cc[_G΋G1=MudVo‘p v.(>XYYG8SXw: zΣvE=;9ĵh+YirAů zY{ r q~]ͪRQ̇sM֏5}E 61]niҵrN 7t2zXcYXE`͓zOG_}$ . ._4ǃ?9Dxbvs;鑭׸o'{VZë(s SVUT_ٸ.~gS] 1-?c{oI$/T|LbXP],Ώ[xfUۖ\ɈYB'Xapq @YY Wر+ojR5b:Mw}lW~G`Vyp5tUVADeKShM]զ.{kG,C"ޤ[e'E4Thayq/_Zt.qd )2!Thb!JgHwF\XOB']AyU\\D-+v|)SheMN~mp;gSf>E/rkMXePPXXPPeFSYS5(y0y+Աo"4fճ6g<Ҏ(er.ӌ퍮2);J\3sQU#q{s3NEÙS7O{9\fXaۛ{jt@y]qVg7Ĺg)Iә"E8CP[@1Rgw?11(.CA?2IY$,:p0W9SoD$:pЁ}AC3 ClEW࿵:&EKW06rE% <^\nVNf ?mQmG}dRFhz@ \Y ?Ў6_~_`0to <|O`1u[Z\߷& ֤\/52 6-F\|)v}[ҜV2^` : y(gJifՠre!j9݆Kl8gܻ^ҬbݫT!ďI [&`13I,GSYtab~፶G5Rַh#q,rԬȸ[3Ө\jq\*A~|.jF(wa) O-RqR~Υ㼑w3*?<!Un̬/Jͯobjaa +f[j<B46r\][/W\"S ',ڈ6m]ϬKn9;*gr P -HJ6\`؀jJB_q;tG1]½ieRP,CrKכObHy q)\^S~?:N:^`ng8KoAeޭFPaD;vAs[{J 6 #$y,"492.õ.IKdҘr·w_ڴi ZV>r67m1YwXE]QQE{dpwwm9מj=pi={vGoVzx%MKR|e +<-n!O| 5t/yBaphK,7A~eoTHhJҐD\JLL_4{J*W;:mjJЦd_C4?gO{ rwi/OW&"&&2NI&p;c"$qbdԯCUX`xE%YQExm=?< 074y@|MܹwV#6[g'?PoZ',L}iK=:˴kjuU( 5ќ=C"xPĈS"<Ă<4cwc$%YLRNDM=l'rozrF ` (}a4.mpN[: !p<e;5A(!:%$~>`Sあ΋j'WVw8Ol7X_^Wnφ !V]@yQ)/)˗+yqٲ!@]/ݓ*,Te*?CFEJSWdr"b){M1:.96E8C )Q8@Dw/ڛ_L^kLQ` o>~ݏ5 gs?'ar9a. XuOf=#`$]0qx$!hGqȻ .2kJpQ? z>ZTitTRVBf \]T%eyi9I¡ RdkAnМJ]ĥ%G<5ziwt) UfZ #;.lLs$`qlz hЁEn> s-2rw5%&tO2Ì6TPQ<Ò)49FPCTI2dONOw*8$CF [5~̹%9 ⪽q;g9)`۷qq9Mh᫻)c8W~QF3brT9ՄQK vj4HAP3Y+KI!ALJDJh@U =c( <"9| 1ը^LL520]j6)٫ġ%a]*ծOs#SH!>R%s:& ͯLDkale'(2zg@lbRJ*1w<1qfdKg~sЕW ؎>gح/3\嶩K6&j [jWfy#&>!i^_voEi ^*$M-7"NXGս^N# XH-_䔲PeĽ2m]^2u; Xoٶ Sϱ.ٲԷF28 =klax^nR0/D,I"Ȉ~5$wGZrι'CHgC:HX&bH|)1K ]&V ꯃHNR7?lLXJg>NP` bxi_ üBG }t}>QA:~_Qy endstream endobj 15 0 obj <> endobj 73 0 obj <>stream xW XS׶>1$瀈SJՓ8ϊëj8 C"3, d0SP!L8kj껭k[z:ݝ`[g}; I{kKD"a9o:qDC*]G6tmŔ޸#HB4 BwƆE*zSN={D4'>:'XI)=B}"ccEF|ʔɡS ٮt TP(ݛgAhΨHp[vaF\$*Fkyv~{3̛8y3f%άbF1f Ǭc60FfY,b\Rf:qe3n f3 b8Fbz3; ό16L(sY2Gb<CXQ6eF3yKvm7B{y>E/^C;8|۩wGbC ̢IRVHCuXr27Ub/vEVo!ڏG]ПE+Ǫ" LX#A|$C{,D{X<L%3g Gp*)@{'x.BβJ_X, YgQ&q`@wǣdk/CG0!&[q%+m]ةM]p'Ȩ\U* CsGbq6v:]O,0L]A9x;ZM@x 6mY08q[w2^xcoh+ޣXw}!9.4=R9sQ _~+3X`Z:exNovuP Sk(cpiCkϳ9+ZZmkqbҋk;+L:_y|&i!%AܣI -a.2`8KWԤr99܇NayP!R\4dF*MIJ+A98Ǿx*S:oXϵ`e>*녬<( RZuUJ~ѫU(GHPuc=ǎ&u{n]-UgkՂ+)AΈ΁K,;,&Rg柆D2IaQv,кNZh`S&d틀tfjtȪA }v1O~mhX@W$QF6V0Ыg$Gh^fRe2̃إTjДP oL#ӷr17zRT? <0Vn,(jH.fn2 CW4xб(?p4Z-C tb/Yo67Yt")IڭG{I:ڟpȚivW,Sz:QOC'+LM,-"K$]y HS526*.tbMhKk@Z[RI Wa͝-'VUtF~D '/J -^[4ړZ Z}KVSUCEQRB>9ckiblًLz>Ti BBrJDs %m- ԇp¨տY۟7- O7e8y7mc&l0(6Ⰱă7:4n9MuW1UVy{ʛ>v` ]^gR,5;qNSo W\K}OH;M-Jbgfv_A>P8U3-ňN)P+ǧοT"d)-7PMYz3^ݭX]\_,%%Zcehk,5s)R85UG|<T7Bv:AH[4}7,3S32({edLTogK =ﲥ%ErERq1[PSv(nZؓ;;#{O1gES| ep2nZGtG--Gћ ..q -ړUg>;fo?0aR:{%s6Heb5Igx [ڌӵ,d?A0}0dhC\-8*Ɂ0_`?9C\E;|蜱*U>񡰓󭉬?h*kpҗ̝K)dӗDۤg+-\]!\Fn܅|{=>3>!XE~Lc>聲\d$}x@SZ4/ Бår*8 26qK!@qQnPTJg6IhKͣXi ^;7ckJN\Jayym;=Y[^G]=8/UxmLhwK˽<|ʝUEk B%\ݖxU[_c\ O.<˄JNCs7ϝ]Bg^Iŝnu}|J5o8cy[Ve j&mlsMnIi'j!PJ] Q8kh.⺌Db[t|,d6 hWб,HE=ґ(3Z㞐 TP~sshIrיNU'TѢnhu T LbS(:ƉXk6wZ$_btaR,<ϭ00Kx*%u[Bj!!) ~xv:G2FwggDAlf)`ъG6 -p ht1 mZ&hhN:(("?1rc$']+e*> endobj 74 0 obj <>stream xu{Tw'2#PũLVŪ튏.iEQj@B1򌈘&H(/AyQ@D*v֖VvuZmwNv==gΙoD8B$9.u^qSqGtW1:5O}D !}8B#=GpRyE ,$ĨeF Ji*:.FO:uFWRSSWsIS4 ih̞ޘ]UDmYBn@UBR&F- QQ' |Sj&e^ٺ]1•| b:x"fa,b#Nl"|w/XH&DAnQ "}?.|ܠx3>q^|K''z*j?s|KK>s-;nsAբP1;/׽?x0|3gFod{ڠj CӮ3d1UPl*4!eu?%! %Spz α( m~a8k%םݸK`Yt cMgg.+ߺ3-v3UO)(7 >y͠$܄FUSUY* O3JrqVB/ƕhmEx [o4qxw[O'4CtgT~3d.àPi [:NW0tWɌAAC*m4=>k ?bo:: _p7q/l5ykP5xc?׳lc~9PPXb6yst܃hGEvNx 8*GLXq-3%GI0l|Cyo)s%닚Yl61HV*a6DowP/W~q;KqJ7d*&xBSz,D8(DASq:pqR*RWƺ.gfJVnuu'U=MSpA@we,Cem,K6<,0 P2V%N/X*yIܸ9Z V<b-CN6W*qA+7IKayjKX zf&A+1sʂX~efs\Z^XիW/ZSظ#Ʌ+a:^u%jć#PT%w^;.dmg:iSx(mnh/|xVJ|5|s&RE%KY䤸^em:Pn ^Ecjg^Gqg:ɠܢ~5cx8ɯc:n5]g,vI.v WD)G\Q+%[.CL5\_*lyR[ýdUsz1FufVbNư>A]ͣ9=]1{8ox );=@iTV5fB27ig]XR[PniGeb۾׮&{l⽫tQ buvHNU2H h=b P~o*kU ; Tq#P1I"\"u|"]OqQ>+vl|O,=,Uw?5ɓm2nDC@X;Q4/Z0kb-\q4]^edDZ/>+[Cyx}c5, F7GIw'}]sؾĹ0DhmG1K.9/A3Pc=AѮ3?B7t(ߜ_w\τme47.XO;68 eF~[Zmxa;xh>|Cਡю;-[mp *̡Nޙ(SD z;`0H8Frjv9ULݰ3*}Sy]7(W,S}FSӹaa>W !`+i K<@JomiIIIUI- {rATV1c}n)\QL,5J2r9ɯuzyi endstream endobj 11 0 obj <> endobj 75 0 obj <>stream xVyPyH(.ڝu]v,Q<5! /D13F@Ad=GGYfgqgu٪mpKgvuwuW{{x"o/P(u6xP^eh! inn. )# )Z_)~`6qZ|ZnC}9y񐒪vڟs2KϺQN7=d>&`ժ*1ݚ;6iV-$w$m#/hy*{n*h*)+b `ȡ٩x*-&#jԶ :UnpHb 9 VLjgnLP DX0Nh|t;u>Pg&!' }е3- GIچIGן8k%q0ūJb+7:֤@.~@QzۨrVH]ϢPCtS{rdJ Su.2r o27Cx i&stT_ +lP?4tWX zpnW7~ka*0jV:t#߫wiS>F+mTpĥ6z(8L>Rc22`n8%@ƭmaB3Cpf(tD|f"#x NgdzhL7\VCY`(3Wpմ79r ~hJԠaE.}.dvB:{} /G $N}ơ1/m,ѴC$@CWZk+/uBxUoʹR;́}rZ-貀hf9'OΎ&p| E Y.~'ur}t =!_߻ ;b{߲͍̣ 'eP"N8mCqj-TBV:#m$A,,n,8W&iVe4os *L-!9b6hYх&>!5> aȦzY*K.iP7ڛ(b#I)EE!O,W|\#oVd|$׷MOa$K!׻Zo<oLaQP OlvXS`6O܋ ^&? ةD}h<4#(VիwDR˻;AƖag53ӝ?<:;=s||E"+ޮ8bاkm^狸u#o34 M?,H])h{ I+I__>ILQ5Sr㇧)wsơ |GTs}\[@1lb&sb]N>^D^LUrVubJKM` ƌM{bbc R1p?6P55d[NqrnnE&a0i``h. kd`1}0}M[-\lk~ v/{L0a#Ss[Orұ::XjvT:y~y8yRL8ZAŹQ 6B\勦@{ܮX7sSPjY~ );ׁwMВ]6lp,jCrUP}:sV]a endstream endobj 9 0 obj <> endobj 76 0 obj <>stream xUkPaQەl4ݓh),"+VPQGYfy px#oy OIXV1,QU+&]MJo[͏43l>U|"D(ӿe"nK1F]8r߸HE/"bF"!Q+uwzo__,(cUҰXm<9V+IH&^!n&jMkzT/FSWh2y\&?*Ψݱr,:OiisTQab՚Tm.XDB±lĢX ۏmǂ1ol`@ǴؠhVή$}}CI$ vW ,͊^ZQUaۘarC:5 T*U48[V ]ۛt 'LJ.uJ)\gb76AD1[/]z}%rvų7&&xym gsy!ٌȢ꙲-wD~erNILP (Nu q_uwfwB#Dxc`= װh{ʭbtאzW(o@hoWɥNG.d=?z5;qA+uW-|J(z 07jm!KIL5 j-~|˼QۆkfQ#+Ud3&m1z :?E\s@[ς("kgl4V zz8zNFjX~ȩ?UOcx }/R: swL>I#|myx3PHJ@?+p늛~ /@Fr@|#s7oXѨͬG}~YG]ݒ^_:XBAEII[i]I=ԁ@, "GL`k,W+^Z#Eca2A #WޞAdYI0@Py ~deTq_uB?!@gBydZӳ{bS.{N⼝-A'O2eD1^~6 ㇻq$s?3SJ%Ǜt?292jaeLxs(csa ";Z?>,jO23LfOh oN\&YG[M@[}1i]8ϣy;Xgl dlzjM$vv-"c,w0 DKpDL<}, |<Զ-ƹ<7ˡmdv5cQ}Eqq+#_RV^^Mfg_w鮆ƒʤ :OdGPJbݶ yOm` !O1չQp{K WW6PRNFp?e(F&;_+~7`͍5@vx؝~. '$[:d1,bXb][N러?sByr\" |/xf.GTo1 cNlU)+C0_mͭOM.J/78TtҮYY' #BNsN+f23kxIxaA+tꉚTM*3 <k0bx=T:9/E$bIkx4[2ZSznvUAs6mx ܼ"r+͝Y0>zOfW#B> endobj 77 0 obj <>stream x-_HSqu]miv"ӢXH!iy:v,37fl&pH/K=H=kp8bHY0 Swvj6O5xLU)^*Ae)1ϴnh)G,>F!~NBZ nkuQSd( A*X=K=* N:7U)0>zL4p5=͸RteFF6 llItKS}k[m͈L*4`,a%9d*d󯗗V¥\+q _7]1Hq}_)K3ޥ~#cxUq^ ƾxFy! @A|g]=0~7'u;i X_GLC|V+.K}yH0ÜjhxaaTëJ!"TkBW= endstream endobj 30 0 obj <> endobj 78 0 obj <>stream xy|SeO bkLs 2ܢ",-6BnICifϓ=5I-mi)-K)b#0QA8z?w xcNZq>~t|G<=͞X9n_Dr*E=<1[B "E1 fw?|? ݏEi7ĕan^ibR֊5֭]OKT#f y9ERMaQ021y?^QQZPttHҼWsHs_&$DziH\V#ILeH1 bfqҲW*s _ Öb)X* ۍbI>l?( ;'{ۊa۱3X2 KbTX{Xݳ~2[rvű1'8|9U80y]`lX}-he mB6ڋb⨒RT1CA8ƾb0M$?16d`Tx!7bN,uQVX(w %OQuAG?Ʈ_]5<9/"Bc ߇V8<*Tm 4b݀ΌT~ٲxCF2‘UazvdJB+r&?WXL*'}Quȴ"b)wZ wJ AwomMRoFe6UC=Í~VVc5rD2BUr5i6=^I/vl˯;:lzo64*8.zstY'iBv06b0tl;; nh17<2O1Hh6tx,NDWޯgʺ&y0(~ 脀RWF0ކr$~/4 L nGKQ@ F4 4'M"?{2~xBN}W'=s{ Z/ڣ\/yXtiv^m0PF~jːQ2rg% ^ʡʦyS`g3=d6d fWs|-WC[ \ ٽ_)ShP \Wi5&%zT_ O**.Dw/38:r5:Y ijH$if<N[jQfb7;܃J!cXl%S[PFBp i N7 ;r|R5e=⎠l>".A>.҂P JF~ $7%fu3 !3ȍ5_ux'x&H/? xY-pl$%s~P%Y19 L1KU&3uR>p{gWd=cf[aV߰ѻ =Aq(Ohͭo,Lwl ێ OLԶjb7k`בGrk])?\/YQ"'N0"!g!*6M_Mp*t"!/pT A }Ah5%쬁ʛ_\fzFj,ϻN~=C'K=Q/adAutz`FFyOwSd|v;o]hb<9t\&9o{}z9k-֦@[W>}[^^Q!758S >\O=c-^6z{=*!B~ZyGUZ)Y"_WLXܣvbBK: x6iM5Tt&F 8(Ƶ>xPMVsdPh5ȦCآH3!=HwW@5oӃ6?xmƖDDJH"}oox/uvd cpZ-6Ր;h^C5o-י;pxKZFFr9RPg{"wNA,?"[ܐ,(*//ij vS_s[d/A sKm%&8Tϭ~>m,%3PmZW-D{Yu0;-"(cd7P :!r9 #}IFo#nh"bBf/:jmqhwNxļk j, endstream endobj 23 0 obj <> endobj 79 0 obj <>stream xX Tg4@D wň1&qCq UP 6}WQJ܈&INP3K2htL̼7Kyw4՜nɘL.Y1^zC&$)30ѯkaɒdĆEM9nּysٽ0nqON ^~>^_DDȻ3gFGG `4h?;wp(o;^>v,  [0̔AK—EDzkڀ=y M1a[f,Y͌g왵:f81Rf ̸0˘ẎlƕqdܘJfcÌdlQX0KƊf LId3هK g˿<~pf-X_K΁;Є=4_a˰B)ymWYNl򱕽XEREo/h/Je#r)בON= b六ԅE Shdiq"ub}f^:̰\)7Ff>Vh{P߿p K~xT3赦>5Z̭xI}!|g"s>[xFVef^[ \PM 'X#{}VD!-82 ?x.VXV,N"9PTJq#Ԙ]F/QZ+,@zb92Tf,yה׬^n;Vy0Z<t8vF;V[tnvnp,MFuCULlh|Qwu?C7e x}Z9pU]58] 1Bh_N 6;B478}FQfBIBZI$1r0=T4t2jp}ktQŜɢo9zPT)8vs2w9&\Jx ~-~ k`ϤQx{V)B yGh`9۳+*|8Φ7O/~_g ` *K 6RPL2$[p*e xf8MӺVӄ)23ZCE3i Ovw#~~pN. ^W*\Yt1Qlh-kyd>/Mkw;'AS נc1PUqѺ,@+t4m% [N)ʊ2U+P "}#5-cd9"n{-Tt/ )d¨hn⧱Sg&yoC*9)XH( S9:⒜ܿ--v. `!Xo*([Eڢ}Цϝ>r2fM"T fݿ*čؐ{b|a f1ga-g/66&;G<# 2[_2Zhמ_QH#DV.eJ|nIfD6\ m3D Dgh$]^NEX%Vt:J ;]aqUߘ-[-S3*K;^! Gh(o5PuB+9IS8mp*qp9kȲt[0 gǑ-I:mb%ƃ pbǀ#?QLi:%'^ ϕ'@'W$%f"Yp*Hm ZH,Cd8Ԋd$ps %tY6+^zX%οBu#lܴ1g~l24D9$Mݱ#s W75p*(hw'\Aho:epЅq<"L9Vc m6-NJWѡՉF}KꦦcC@l~܏] 1(j8\C" E=A?CV$Þ8٠Sc 󬏩>^[>2a#y)L-Nc v@r讃&}KU1}*"-b-z%D1bhMk4SpN2wkp(s Ww<N;~3{J}CV }*:ZHSv֠ؐ+4v/pfRt<-`4f)+߾6Z ;Oeu)p%ZheC (o_%7^TQepccUuC1d47d ʗ~{.¯{QT?Gu2L싄൐\7BHcji\-\Z¢^0T.堕g*R/(uSdbvBVT._[~$*tWYœpn;_u*dǐS*f.tp*TE'\w71M[p>.?q=6}szDz͡ۃ0X9Q9?uA[/2ȣ{8YC>j ;EǸ'u\k}Ya[ѪӈMmG *|Qxd)euom. ̓%EYvƝJ |XDޥ":]*_ԃ~x~I冽YtbB'̥4:~1ҁ* ūP5SG2\_qѪ a#! rąo|ũ>`*(C.J 1J )ut1R꽹4 j8v7,IgϏ^8ՁN&i JөɋV#Y1^UtSXi6:q?85/זɮHrл.seq Qi o?f%tѥ%s`7کm1=6k# HIzg=OFDV$^O9 ?Cr>;[txs̡vGaƄn `=\V&$|M/z|&ka_}>stream 2010-06-10T10:02:23+01:00 2010-06-10T10:02:23+01:00 dvips()5.98 Copyright 2009 Radical Eye Software yappyDoc.dvi endstream endobj 2 0 obj <>endobj xref 0 87 0000000000 65535 f 0000035985 00000 n 0000084673 00000 n 0000035870 00000 n 0000034414 00000 n 0000000015 00000 n 0000005055 00000 n 0000036050 00000 n 0000039965 00000 n 0000071143 00000 n 0000039506 00000 n 0000068608 00000 n 0000039146 00000 n 0000065559 00000 n 0000038668 00000 n 0000061443 00000 n 0000038083 00000 n 0000053720 00000 n 0000037466 00000 n 0000045685 00000 n 0000037063 00000 n 0000041672 00000 n 0000041141 00000 n 0000078278 00000 n 0000036091 00000 n 0000036121 00000 n 0000034574 00000 n 0000005075 00000 n 0000008595 00000 n 0000040775 00000 n 0000074473 00000 n 0000036228 00000 n 0000036258 00000 n 0000034736 00000 n 0000008616 00000 n 0000011930 00000 n 0000036334 00000 n 0000036364 00000 n 0000034898 00000 n 0000011951 00000 n 0000016274 00000 n 0000040317 00000 n 0000073462 00000 n 0000036418 00000 n 0000036448 00000 n 0000035060 00000 n 0000016295 00000 n 0000019480 00000 n 0000036524 00000 n 0000036554 00000 n 0000035222 00000 n 0000019501 00000 n 0000024654 00000 n 0000036619 00000 n 0000036649 00000 n 0000035384 00000 n 0000024675 00000 n 0000027431 00000 n 0000036723 00000 n 0000036753 00000 n 0000035546 00000 n 0000027452 00000 n 0000031451 00000 n 0000036840 00000 n 0000036870 00000 n 0000035708 00000 n 0000031472 00000 n 0000034393 00000 n 0000036957 00000 n 0000036987 00000 n 0000042065 00000 n 0000046161 00000 n 0000054336 00000 n 0000061790 00000 n 0000065846 00000 n 0000068886 00000 n 0000071450 00000 n 0000073713 00000 n 0000074770 00000 n 0000078637 00000 n 0000037979 00000 n 0000038576 00000 n 0000039054 00000 n 0000039878 00000 n 0000040646 00000 n 0000041588 00000 n 0000083244 00000 n trailer << /Size 87 /Root 1 0 R /Info 2 0 R /ID [<7F9A647D35BFF9311A1BC762B0B5B815><7F9A647D35BFF9311A1BC762B0B5B815>] >> startxref 84879 %%EOF