fixes/__init__.pyc000064400000000205147204472210010136 0ustar00 {fc@sdS(N((((s./usr/lib64/python2.7/lib2to3/fixes/__init__.pyttfixes/fix_apply.pyc000064400000004044147204472210010377 0ustar00 {fc@sodZddlmZddlmZddlmZddlmZmZm Z dej fdYZ dS( sIFixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).i(tpytree(ttoken(t fixer_base(tCalltCommat parenthesizetFixApplycBseZeZdZdZRS(s. power< 'apply' trailer< '(' arglist< (not argument ')' > > c Cs|j}|st|d}|d}|jd}|r|j|jjkrWdS|j|jjkr|jdjdkrdSn|r|j|jjkr|jdjdkrdS|j}|j }|jt j |j fkr(|j|j ks|jdjt jkr(t|}nd|_|j }d|_|dk rj|j }d|_ntjt jd|g}|dk r|jttjt jd |gd |d_nt||d |S( Ntfunctargstkwdsis**itu*u**u tprefix(tsymstAssertionErrortgetttypet star_exprtargumenttchildrentvalueR tcloneRtNAMEtatomtpowert DOUBLESTARRtNoneRtLeaftSTARtextendRR( tselftnodetresultsR RRR R t l_newargs((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyt transformsB               (t__name__t __module__tTruet BM_compatibletPATTERNR!(((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyRsN( t__doc__R Rtpgen2RRt fixer_utilRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyts fixes/fix_apply.pyo000064400000004003147204472210010406 0ustar00 {fc@sodZddlmZddlmZddlmZddlmZmZm Z dej fdYZ dS( sIFixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).i(tpytree(ttoken(t fixer_base(tCalltCommat parenthesizetFixApplycBseZeZdZdZRS(s. power< 'apply' trailer< '(' arglist< (not argument ')' > > c Cs|j}|d}|d}|jd}|r}|j|jjkrKdS|j|jjkr}|jdjdkr}dSn|r|j|jjkr|jdjdkrdS|j}|j}|jt j |j fkr|j|j ks |jdjt j krt|}nd|_|j}d|_|dk r^|j}d|_ntjt jd|g}|dk r|jttjt j d |gd |d_nt||d |S( Ntfunctargstkwdsis**itu*u**u tprefix(tsymstgetttypet star_exprtargumenttchildrentvalueR tcloneRtNAMEtatomtpowert DOUBLESTARRtNoneRtLeaftSTARtextendRR( tselftnodetresultsR RRR R t l_newargs((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyt transforms@              (t__name__t __module__tTruet BM_compatibletPATTERNR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyRsN( t__doc__R Rtpgen2RRt fixer_utilRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_apply.pyts fixes/fix_asserts.py000064400000001730147204472210010572 0ustar00"""Fixer that replaces deprecated unittest method names.""" # Author: Ezio Melotti from ..fixer_base import BaseFix from ..fixer_util import Name NAMES = dict( assert_="assertTrue", assertEquals="assertEqual", assertNotEquals="assertNotEqual", assertAlmostEquals="assertAlmostEqual", assertNotAlmostEquals="assertNotAlmostEqual", assertRegexpMatches="assertRegex", assertRaisesRegexp="assertRaisesRegex", failUnlessEqual="assertEqual", failIfEqual="assertNotEqual", failUnlessAlmostEqual="assertAlmostEqual", failIfAlmostEqual="assertNotAlmostEqual", failUnless="assertTrue", failUnlessRaises="assertRaises", failIf="assertFalse", ) class FixAsserts(BaseFix): PATTERN = """ power< any+ trailer< '.' meth=(%s)> any* > """ % '|'.join(map(repr, NAMES)) def transform(self, node, results): name = results["meth"][0] name.replace(Name(NAMES[str(name)], prefix=name.prefix)) fixes/fix_asserts.pyc000064400000003031147204472210010731 0ustar00 {fc@sdZddlmZddlmZedddddd d d d d dddddddd dd dd ddddddZdefdYZdS(s5Fixer that replaces deprecated unittest method names.i(tBaseFix(tNametassert_t assertTruet assertEqualst assertEqualtassertNotEqualstassertNotEqualtassertAlmostEqualstassertAlmostEqualtassertNotAlmostEqualstassertNotAlmostEqualtassertRegexpMatchest assertRegextassertRaisesRegexptassertRaisesRegextfailUnlessEqualt failIfEqualtfailUnlessAlmostEqualtfailIfAlmostEqualt failUnlesstfailUnlessRaisest assertRaisestfailIft assertFalset FixAssertscBs-eZddjeeeZdZRS(sH power< any+ trailer< '.' meth=(%s)> any* > t|cCs8|dd}|jttt|d|jdS(Ntmethitprefix(treplaceRtNAMEStstrR(tselftnodetresultstname((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyt transform s(t__name__t __module__tjointmaptreprRtPATTERNR$(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyRsN(t__doc__t fixer_baseRt fixer_utilRtdictRR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyts$ fixes/fix_basestring.pyc000064400000001447147204472210011417 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sFixer for basestring -> str.i(t fixer_base(tNamet FixBasestringcBseZeZdZdZRS(s 'basestring'cCstdd|jS(Nustrtprefix(RR(tselftnodetresults((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pyt transform s(t__name__t __module__tTruet BM_compatibletPATTERNR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pytsfixes/fix_buffer.pyc000064400000001704147204472210010523 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s4Fixer that changes buffer(...) into memoryview(...).i(t fixer_base(tNamet FixBuffercBs#eZeZeZdZdZRS(sR power< name='buffer' trailer< '(' [any] ')' > any* > cCs*|d}|jtdd|jdS(Ntnameu memoryviewtprefix(treplaceRR(tselftnodetresultsR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pyt transforms (t__name__t __module__tTruet BM_compatibletexplicittPATTERNR (((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pytsfixes/fix_dict.pyo000064400000007074147204472210010217 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZddlmZm Z m Z m Z m Z m Z ddlmZejedgBZd ejfd YZd S( sjFixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) d.viewkeys() -> d.keys() d.viewitems() -> d.items() d.viewvalues() -> d.values() Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). i(tpytree(tpatcomp(ttoken(t fixer_base(tNametCalltLParentRParentArgListtDot(t fixer_utiltitertFixDictcBsPeZeZdZdZdZejeZ dZ eje Z dZ RS(s power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues'| 'viewkeys'|'viewitems'|'viewvalues') > parens=trailer< '(' ')' > tail=any* > cCs|d}|dd}|d}|j}|j}|jd}|jd} |s^| rk|d}ng|D]} | j^qr}g|D]} | j^q}| o|j||} |tj|jtt |d|j g|d jg} tj|j | } | p!| sTd | _ t t |r?dnd | g} n|rytj|j | g|} n|j | _ | S( Ntheadtmethodittailuiteruviewitprefixtparensuulist( tsymstvaluet startswithtclonetin_special_contextRtNodettrailerR RRtpowerR(tselftnodetresultsR RRRt method_nametisitertisviewtntspecialtargstnew((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyt transform7s2         ' s3power< func=NAME trailer< '(' node=any ')' > any* >smfor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > cCs|jdkrtSi}|jjdk r|jj|jj|r|d|kr|rm|djtkS|djtjkSn|stS|j j|j|o|d|kS(NRtfunc( tparenttNonetFalsetp1tmatchRt iter_exemptR tconsuming_callstp2(RRRR((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyR[s( t__name__t __module__tTruet BM_compatibletPATTERNR$tP1Rtcompile_patternR)tP2R-R(((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyR *s  N(t__doc__tRRtpgen2RRR RRRRRR R,tsetR+tBaseFixR (((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyts.fixes/fix_except.py000064400000006430147204472210010400 0ustar00"""Fixer for except statements with named exceptions. The following cases will be converted: - "except E, T:" where T is a name: except E as T: - "except E, T:" where T is not a name, tuple or list: except E as t: T = t This is done because the target of an "except" clause must be a name. - "except E, T:" where T is a tuple or list literal: except E as t: T = t.args """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Attr, Name, is_tuple, is_list, syms def find_excepts(nodes): for i, n in enumerate(nodes): if n.type == syms.except_clause: if n.children[0].value == u'except': yield (n, nodes[i+2]) class FixExcept(fixer_base.BaseFix): BM_compatible = True PATTERN = """ try_stmt< 'try' ':' (simple_stmt | suite) cleanup=(except_clause ':' (simple_stmt | suite))+ tail=(['except' ':' (simple_stmt | suite)] ['else' ':' (simple_stmt | suite)] ['finally' ':' (simple_stmt | suite)]) > """ def transform(self, node, results): syms = self.syms tail = [n.clone() for n in results["tail"]] try_cleanup = [ch.clone() for ch in results["cleanup"]] for except_clause, e_suite in find_excepts(try_cleanup): if len(except_clause.children) == 4: (E, comma, N) = except_clause.children[1:4] comma.replace(Name(u"as", prefix=u" ")) if N.type != token.NAME: # Generate a new N for the except clause new_N = Name(self.new_name(), prefix=u" ") target = N.clone() target.prefix = u"" N.replace(new_N) new_N = new_N.clone() # Insert "old_N = new_N" as the first statement in # the except body. This loop skips leading whitespace # and indents #TODO(cwinter) suite-cleanup suite_stmts = e_suite.children for i, stmt in enumerate(suite_stmts): if isinstance(stmt, pytree.Node): break # The assignment is different if old_N is a tuple or list # In that case, the assignment is old_N = new_N.args if is_tuple(N) or is_list(N): assign = Assign(target, Attr(new_N, Name(u'args'))) else: assign = Assign(target, new_N) #TODO(cwinter) stopgap until children becomes a smart list for child in reversed(suite_stmts[:i]): e_suite.insert_child(0, child) e_suite.insert_child(i, assign) elif N.prefix == u"": # No space after a comma is legal; no space after "as", # not so much. N.prefix = u" " #TODO(cwinter) fix this when children becomes a smart list children = [c.clone() for c in node.children[:3]] + try_cleanup + tail return pytree.Node(node.type, children) fixes/fix_except.pyc000064400000005701147204472210010543 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZmZm Z m Z m Z m Z dZ dejfdYZd S( sFixer for except statements with named exceptions. The following cases will be converted: - "except E, T:" where T is a name: except E as T: - "except E, T:" where T is not a name, tuple or list: except E as t: T = t This is done because the target of an "except" clause must be a name. - "except E, T:" where T is a tuple or list literal: except E as t: T = t.args i(tpytree(ttoken(t fixer_base(tAssigntAttrtNametis_tupletis_listtsymsccsbx[t|D]M\}}|jtjkr |jdjdkrZ|||dfVqZq q WdS(Niuexcepti(t enumeratettypeRt except_clausetchildrentvalue(tnodestitn((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyt find_exceptsst FixExceptcBseZeZdZdZRS(s1 try_stmt< 'try' ':' (simple_stmt | suite) cleanup=(except_clause ':' (simple_stmt | suite))+ tail=(['except' ':' (simple_stmt | suite)] ['else' ':' (simple_stmt | suite)] ['finally' ':' (simple_stmt | suite)]) > cCs,|j}g|dD]}|j^q}g|dD]}|j^q7}xt|D]\}} t|jdkr\|jdd!\} } } | jtddd| jtj krt|j dd} | j}d|_ | j| | j} | j}x0t |D]"\}}t |tjrPqqWt| s[t| r|t|t| td }nt|| }x(t|| D]}| jd |qW| j||q| j dkrd| _ qq\q\Wg|jd D]}|j^q||}tj|j|S( Nttailtcleanupiiuastprefixu uuargsii(RtcloneRtlenR treplaceRR RtNAMEtnew_nameRR t isinstanceRtNodeRRRRtreversedt insert_child(tselftnodetresultsRRRtcht try_cleanupR te_suitetEtcommatNtnew_Nttargett suite_stmtsRtstmttassigntchildtcR ((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyt transform/s6 ##     !.(t__name__t __module__tTruet BM_compatibletPATTERNR/(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyR$sN(t__doc__tRtpgen2RRt fixer_utilRRRRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyts . fixes/fix_except.pyo000064400000005701147204472210010557 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZmZm Z m Z m Z m Z dZ dejfdYZd S( sFixer for except statements with named exceptions. The following cases will be converted: - "except E, T:" where T is a name: except E as T: - "except E, T:" where T is not a name, tuple or list: except E as t: T = t This is done because the target of an "except" clause must be a name. - "except E, T:" where T is a tuple or list literal: except E as t: T = t.args i(tpytree(ttoken(t fixer_base(tAssigntAttrtNametis_tupletis_listtsymsccsbx[t|D]M\}}|jtjkr |jdjdkrZ|||dfVqZq q WdS(Niuexcepti(t enumeratettypeRt except_clausetchildrentvalue(tnodestitn((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyt find_exceptsst FixExceptcBseZeZdZdZRS(s1 try_stmt< 'try' ':' (simple_stmt | suite) cleanup=(except_clause ':' (simple_stmt | suite))+ tail=(['except' ':' (simple_stmt | suite)] ['else' ':' (simple_stmt | suite)] ['finally' ':' (simple_stmt | suite)]) > cCs,|j}g|dD]}|j^q}g|dD]}|j^q7}xt|D]\}} t|jdkr\|jdd!\} } } | jtddd| jtj krt|j dd} | j}d|_ | j| | j} | j}x0t |D]"\}}t |tjrPqqWt| s[t| r|t|t| td }nt|| }x(t|| D]}| jd |qW| j||q| j dkrd| _ qq\q\Wg|jd D]}|j^q||}tj|j|S( Nttailtcleanupiiuastprefixu uuargsii(RtcloneRtlenR treplaceRR RtNAMEtnew_nameRR t isinstanceRtNodeRRRRtreversedt insert_child(tselftnodetresultsRRRtcht try_cleanupR te_suitetEtcommatNtnew_Nttargett suite_stmtsRtstmttassigntchildtcR ((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyt transform/s6 ##     !.(t__name__t __module__tTruet BM_compatibletPATTERNR/(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyR$sN(t__doc__tRtpgen2RRt fixer_utilRRRRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_except.pyts . fixes/fix_exec.pyo000064400000002567147204472210010222 0ustar00 {fc@s_dZddlmZddlmZddlmZmZmZdejfdYZ dS(sFixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) i(tpytree(t fixer_base(tCommatNametCalltFixExeccBseZeZdZdZRS(sx exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > cCs|j}|d}|jd}|jd}|jg}d|d_|dk rx|jt|jgn|dk r|jt|jgnttd|d|jS(Ntatbtctiuexectprefix( tsymstgettcloneR tNonetextendRRR(tselftnodetresultsR RRRtargs((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyt transforms     (t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyRsN( t__doc__R RRt fixer_utilRRRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyt sfixes/fix_execfile.py000064400000004010147204472210010664 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for execfile. This converts usages of the execfile function into calls to the built-in exec() function. """ from .. import fixer_base from ..fixer_util import (Comma, Name, Call, LParen, RParen, Dot, Node, ArgList, String, syms) class FixExecfile(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > | power< 'execfile' trailer< '(' filename=any ')' > > """ def transform(self, node, results): assert results filename = results["filename"] globals = results.get("globals") locals = results.get("locals") # Copy over the prefix from the right parentheses end of the execfile # call. execfile_paren = node.children[-1].children[-1].clone() # Construct open().read(). open_args = ArgList([filename.clone(), Comma(), String('"rb"', ' ')], rparen=execfile_paren) open_call = Node(syms.power, [Name(u"open"), open_args]) read = [Node(syms.trailer, [Dot(), Name(u'read')]), Node(syms.trailer, [LParen(), RParen()])] open_expr = [open_call] + read # Wrap the open call in a compile call. This is so the filename will be # preserved in the execed code. filename_arg = filename.clone() filename_arg.prefix = u" " exec_str = String(u"'exec'", u" ") compile_args = open_expr + [Comma(), filename_arg, Comma(), exec_str] compile_call = Call(Name(u"compile"), compile_args, u"") # Finally, replace the execfile call with an exec call. args = [compile_call] if globals is not None: args.extend([Comma(), globals.clone()]) if locals is not None: args.extend([Comma(), locals.clone()]) return Call(Name(u"exec"), args, prefix=node.prefix) fixes/fix_execfile.pyc000064400000004074147204472210011041 0ustar00 {fc@sydZddlmZddlmZmZmZmZmZm Z m Z m Z m Z m Z dejfdYZdS(soFixer for execfile. This converts usages of the execfile function into calls to the built-in exec() function. i(t fixer_base( tCommatNametCalltLParentRParentDottNodetArgListtStringtsymst FixExecfilecBseZeZdZdZRS(s power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > | power< 'execfile' trailer< '(' filename=any ')' > > cCs|s t|d}|jd}|jd}|jdjdj}t|jttddgd|}ttj t d|g}ttj t t d gttj t tgg} |g| } |j} d | _td d } | t| t| g} tt d | d }|g}|dk rq|jt|jgn|dk r|jt|jgntt d|d|jS(Ntfilenametglobalstlocalsis"rb"t trparenuopenureadu u'exec'ucompileuuexectprefix(tAssertionErrortgettchildrentcloneRRR RR tpowerRttrailerRRRRRtNonetextend(tselftnodetresultsR R Rtexecfile_parent open_argst open_calltreadt open_exprt filename_argtexec_strt compile_argst compile_calltargs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pyt transforms,  $ !      (t__name__t __module__tTruet BM_compatibletPATTERNR'(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pyR sN(t__doc__tRt fixer_utilRRRRRRRRR R tBaseFixR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pytsFfixes/fix_execfile.pyo000064400000004033147204472210011050 0ustar00 {fc@sydZddlmZddlmZmZmZmZmZm Z m Z m Z m Z m Z dejfdYZdS(soFixer for execfile. This converts usages of the execfile function into calls to the built-in exec() function. i(t fixer_base( tCommatNametCalltLParentRParentDottNodetArgListtStringtsymst FixExecfilecBseZeZdZdZRS(s power< 'execfile' trailer< '(' arglist< filename=any [',' globals=any [',' locals=any ] ] > ')' > > | power< 'execfile' trailer< '(' filename=any ')' > > cCs|d}|jd}|jd}|jdjdj}t|jttddgd|}ttjt d|g}ttj t t d gttj t t gg} |g| } |j} d | _td d } | t| t| g} tt d | d }|g}|dk re|jt|jgn|dk r|jt|jgntt d|d|jS(Ntfilenametglobalstlocalsis"rb"t trparenuopenureadu u'exec'ucompileuuexectprefix(tgettchildrentcloneRRR RR tpowerRttrailerRRRRRtNonetextend(tselftnodetresultsR R Rtexecfile_parent open_argst open_calltreadt open_exprt filename_argtexec_strt compile_argst compile_calltargs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pyt transforms* $ !      (t__name__t __module__tTruet BM_compatibletPATTERNR&(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pyR sN(t__doc__tRt fixer_utilRRRRRRRRR R tBaseFixR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_execfile.pytsFfixes/fix_exitfunc.py000064400000004707147204472210010742 0ustar00""" Convert use of sys.exitfunc to use the atexit module. """ # Author: Benjamin Peterson from lib2to3 import pytree, fixer_base from lib2to3.fixer_util import Name, Attr, Call, Comma, Newline, syms class FixExitfunc(fixer_base.BaseFix): keep_line_order = True BM_compatible = True PATTERN = """ ( sys_import=import_name<'import' ('sys' | dotted_as_names< (any ',')* 'sys' (',' any)* > ) > | expr_stmt< power< 'sys' trailer< '.' 'exitfunc' > > '=' func=any > ) """ def __init__(self, *args): super(FixExitfunc, self).__init__(*args) def start_tree(self, tree, filename): super(FixExitfunc, self).start_tree(tree, filename) self.sys_import = None def transform(self, node, results): # First, find the sys import. We'll just hope it's global scope. if "sys_import" in results: if self.sys_import is None: self.sys_import = results["sys_import"] return func = results["func"].clone() func.prefix = u"" register = pytree.Node(syms.power, Attr(Name(u"atexit"), Name(u"register")) ) call = Call(register, [func], node.prefix) node.replace(call) if self.sys_import is None: # That's interesting. self.warning(node, "Can't find sys import; Please add an atexit " "import at the top of your file.") return # Now add an atexit import after the sys import. names = self.sys_import.children[1] if names.type == syms.dotted_as_names: names.append_child(Comma()) names.append_child(Name(u"atexit", u" ")) else: containing_stmt = self.sys_import.parent position = containing_stmt.children.index(self.sys_import) stmt_container = containing_stmt.parent new_import = pytree.Node(syms.import_name, [Name(u"import"), Name(u"atexit", u" ")] ) new = pytree.Node(syms.simple_stmt, [new_import]) containing_stmt.insert_child(position + 1, Newline()) containing_stmt.insert_child(position + 2, new) fixes/fix_exitfunc.pyc000064400000005305147204472210011100 0ustar00 {fc@sgdZddlmZmZddlmZmZmZmZm Z m Z dej fdYZ dS(s7 Convert use of sys.exitfunc to use the atexit module. i(tpytreet fixer_base(tNametAttrtCalltCommatNewlinetsymst FixExitfunccBs5eZeZeZdZdZdZdZRS(s ( sys_import=import_name<'import' ('sys' | dotted_as_names< (any ',')* 'sys' (',' any)* > ) > | expr_stmt< power< 'sys' trailer< '.' 'exitfunc' > > '=' func=any > ) cGstt|j|dS(N(tsuperRt__init__(tselftargs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR scCs&tt|j||d|_dS(N(R Rt start_treetNonet sys_import(R ttreetfilename((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR !sc Csd|kr/|jdkr+|d|_ndS|dj}d|_tjtjtt dt d}t ||g|j}|j ||jdkr|j |ddS|jj d}|jtjkr|jt|jt ddn|jj}|j j|j}|j} tjtjt d t ddg} tjtj| g} |j|dt|j|d | dS( NRtfuncuuatexituregistersKCan't find sys import; Please add an atexit import at the top of your file.iu uimporti(RRtclonetprefixRtNodeRtpowerRRRtreplacetwarningtchildrenttypetdotted_as_namest append_childRtparenttindext import_namet simple_stmtt insert_childR( R tnodetresultsRtregistertcalltnamestcontaining_stmttpositiontstmt_containert new_importtnew((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyt transform%s2       ( t__name__t __module__tTruetkeep_line_ordert BM_compatibletPATTERNR R R,(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR s   N( t__doc__tlib2to3RRtlib2to3.fixer_utilRRRRRRtBaseFixR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyts.fixes/fix_exitfunc.pyo000064400000005305147204472210011114 0ustar00 {fc@sgdZddlmZmZddlmZmZmZmZm Z m Z dej fdYZ dS(s7 Convert use of sys.exitfunc to use the atexit module. i(tpytreet fixer_base(tNametAttrtCalltCommatNewlinetsymst FixExitfunccBs5eZeZeZdZdZdZdZRS(s ( sys_import=import_name<'import' ('sys' | dotted_as_names< (any ',')* 'sys' (',' any)* > ) > | expr_stmt< power< 'sys' trailer< '.' 'exitfunc' > > '=' func=any > ) cGstt|j|dS(N(tsuperRt__init__(tselftargs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR scCs&tt|j||d|_dS(N(R Rt start_treetNonet sys_import(R ttreetfilename((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR !sc Csd|kr/|jdkr+|d|_ndS|dj}d|_tjtjtt dt d}t ||g|j}|j ||jdkr|j |ddS|jj d}|jtjkr|jt|jt ddn|jj}|j j|j}|j} tjtjt d t ddg} tjtj| g} |j|dt|j|d | dS( NRtfuncuuatexituregistersKCan't find sys import; Please add an atexit import at the top of your file.iu uimporti(RRtclonetprefixRtNodeRtpowerRRRtreplacetwarningtchildrenttypetdotted_as_namest append_childRtparenttindext import_namet simple_stmtt insert_childR( R tnodetresultsRtregistertcalltnamestcontaining_stmttpositiontstmt_containert new_importtnew((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyt transform%s2       ( t__name__t __module__tTruetkeep_line_ordert BM_compatibletPATTERNR R R,(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyR s   N( t__doc__tlib2to3RRtlib2to3.fixer_utilRRRRRRtBaseFixR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_exitfunc.pyts.fixes/fix_filter.pyc000064400000004336147204472210010543 0ustar00 {fc@sedZddlmZddlmZddlmZmZmZm Z dej fdYZ dS(sFixer that changes filter(F, X) into list(filter(F, X)). We avoid the transformation if the filter() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on filter(F, X) to return a string if X is a string and a tuple if X is a tuple. That would require type inference, which we don't do. Let Python 2.6 figure it out. i(ttoken(t fixer_base(tNametCalltListComptin_special_contextt FixFiltercBs#eZeZdZdZdZRS(s filter_lambda=power< 'filter' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'filter' trailer< '(' arglist< none='None' ',' seq=any > ')' > > | power< 'filter' args=trailer< '(' [any] ')' > > sfuture_builtins.filtercCs|j|rdSd|krst|jdj|jdj|jdj|jdj}n}d|krttdtd|djtd}n=t|rdS|j}d|_ttd |g}|j|_|S( Nt filter_lambdatfptittxptnoneu_ftsequulist( t should_skipRtgettcloneRRtNonetprefixR(tselftnodetresultstnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyt transform5s&         (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyRsN( t__doc__tpgen2RtRt fixer_utilRRRRtConditionalFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyts"fixes/fix_funcattrs.pyc000064400000002150147204472210011257 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s3Fix function attribute names (f.func_x -> f.__x__).i(t fixer_base(tNamet FixFuncattrscBseZeZdZdZRS(s power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' | 'func_name' | 'func_defaults' | 'func_code' | 'func_dict') > any* > cCs9|dd}|jtd|jdd|jdS(Ntattriu__%s__itprefix(treplaceRtvalueR(tselftnodetresultsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR (((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pytsfixes/fix_future.pyc000064400000001645147204472210010570 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sVRemove __future__ imports from __future__ import foo is replaced with an empty line. i(t fixer_base(t BlankLinet FixFuturecBs#eZeZdZdZdZRS(s;import_from< 'from' module_name="__future__" 'import' any >i cCst}|j|_|S(N(Rtprefix(tselftnodetresultstnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pyt transforms  (t__name__t __module__tTruet BM_compatibletPATTERNt run_orderR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pytsfixes/fix_getcwdu.pyc000064400000001654147204472210010720 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s1 Fixer that changes os.getcwdu() to os.getcwd(). i(t fixer_base(tNamet FixGetcwducBseZeZdZdZRS(sR power< 'os' trailer< dot='.' name='getcwdu' > any* > cCs*|d}|jtdd|jdS(Ntnameugetcwdtprefix(treplaceRR(tselftnodetresultsR((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pytsfixes/fix_has_key.pyc000064400000006176147204472210010705 0ustar00 {fc@sidZddlmZddlmZddlmZddlmZmZdej fdYZ dS( s&Fixer for has_key(). Calls to .has_key() methods are expressed in terms of the 'in' operator: d.has_key(k) -> k in d CAVEATS: 1) While the primary target of this fixer is dict.has_key(), the fixer will change any has_key() method call, regardless of its class. 2) Cases like this will not be converted: m = d.has_key if m(k): ... Only *calls* to has_key() are converted. While it is possible to convert the above to something like m = d.__contains__ if m(k): ... this is currently not done. i(tpytree(ttoken(t fixer_base(tNamet parenthesizet FixHasKeycBseZeZdZdZRS(s anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > after=any* > | negation=not_test< 'not' anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > > > c CsU|s t|j}|jj|jkrC|jj|jrCdS|jd}|d}|j }g|dD]}|j ^qp}|dj } |jd} | rg| D]}|j ^q} n| j|j |j|j |j |j|j|jfkrt| } nt|dkr6|d}ntj|j|}d|_ td d d} |rtd d d} tj|j| | f} ntj|j | | |f} | rt| } tj|j| ft| } n|jj|j |j|j|j|j|j|j|j|jf krHt| } n|| _ | S( Ntnegationtanchortbeforetargtafteriiu uintprefixunot( tAssertionErrortsymstparentttypetnot_testtpatterntmatchtNonetgetR tclonet comparisontand_testtor_testttesttlambdeftargumentRtlenRtNodetpowerRtcomp_opttupletexprtxor_exprtand_exprt shift_exprt arith_exprttermtfactor(tselftnodetresultsR RRR tnRR R tn_optn_nottnew((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyt transformHsF    #"!   %   (t__name__t __module__tTruet BM_compatibletPATTERNR/(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyR'sN( t__doc__tRtpgen2RRt fixer_utilRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyts fixes/fix_has_key.pyo000064400000006135147204472210010714 0ustar00 {fc@sidZddlmZddlmZddlmZddlmZmZdej fdYZ dS( s&Fixer for has_key(). Calls to .has_key() methods are expressed in terms of the 'in' operator: d.has_key(k) -> k in d CAVEATS: 1) While the primary target of this fixer is dict.has_key(), the fixer will change any has_key() method call, regardless of its class. 2) Cases like this will not be converted: m = d.has_key if m(k): ... Only *calls* to has_key() are converted. While it is possible to convert the above to something like m = d.__contains__ if m(k): ... this is currently not done. i(tpytree(ttoken(t fixer_base(tNamet parenthesizet FixHasKeycBseZeZdZdZRS(s anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > after=any* > | negation=not_test< 'not' anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > > > c CsI|j}|jj|jkr7|jj|jr7dS|jd}|d}|j}g|dD]}|j ^qd}|dj } |jd} | rg| D]}|j ^q} n| j|j |j|j |j |j |j|jfkr t| } nt|dkr*|d}ntj|j|}d|_td d d} |rtd d d} tj|j| | f} ntj|j | | |f} | rt| } tj|j| ft| } n|jj|j |j|j|j|j|j|j|j|jf kr<t| } n|| _| S( Ntnegationtanchortbeforetargtafteriiu uintprefixunot(tsymstparentttypetnot_testtpatterntmatchtNonetgetR tclonet comparisontand_testtor_testttesttlambdeftargumentRtlenRtNodetpowerRtcomp_opttupletexprtxor_exprtand_exprt shift_exprt arith_exprttermtfactor(tselftnodetresultsR RRR tnRR R tn_optn_nottnew((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyt transformHsD   #"!   %   (t__name__t __module__tTruet BM_compatibletPATTERNR.(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyR'sN( t__doc__tRtpgen2RRt fixer_utilRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_has_key.pyts fixes/fix_idioms.py000064400000011431147204472210010371 0ustar00"""Adjust some old Python 2 idioms to their modern counterparts. * Change some type comparisons to isinstance() calls: type(x) == T -> isinstance(x, T) type(x) is T -> isinstance(x, T) type(x) != T -> not isinstance(x, T) type(x) is not T -> not isinstance(x, T) * Change "while 1:" into "while True:". * Change both v = list(EXPR) v.sort() foo(v) and the more general v = EXPR v.sort() foo(v) into v = sorted(EXPR) foo(v) """ # Author: Jacques Frechet, Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Call, Comma, Name, Node, BlankLine, syms CMP = "(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)" TYPE = "power< 'type' trailer< '(' x=any ')' > >" class FixIdioms(fixer_base.BaseFix): explicit = True # The user must ask for this fixer PATTERN = r""" isinstance=comparison< %s %s T=any > | isinstance=comparison< T=any %s %s > | while_stmt< 'while' while='1' ':' any+ > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' power< list='list' trailer< '(' (not arglist) any ')' > > > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > """ % (TYPE, CMP, CMP, TYPE) def match(self, node): r = super(FixIdioms, self).match(node) # If we've matched one of the sort/sorted subpatterns above, we # want to reject matches where the initial assignment and the # subsequent .sort() call involve different identifiers. if r and "sorted" in r: if r["id1"] == r["id2"]: return r return None return r def transform(self, node, results): if "isinstance" in results: return self.transform_isinstance(node, results) elif "while" in results: return self.transform_while(node, results) elif "sorted" in results: return self.transform_sort(node, results) else: raise RuntimeError("Invalid match") def transform_isinstance(self, node, results): x = results["x"].clone() # The thing inside of type() T = results["T"].clone() # The type being compared against x.prefix = u"" T.prefix = u" " test = Call(Name(u"isinstance"), [x, Comma(), T]) if "n" in results: test.prefix = u" " test = Node(syms.not_test, [Name(u"not"), test]) test.prefix = node.prefix return test def transform_while(self, node, results): one = results["while"] one.replace(Name(u"True", prefix=one.prefix)) def transform_sort(self, node, results): sort_stmt = results["sort"] next_stmt = results["next"] list_call = results.get("list") simple_expr = results.get("expr") if list_call: list_call.replace(Name(u"sorted", prefix=list_call.prefix)) elif simple_expr: new = simple_expr.clone() new.prefix = u"" simple_expr.replace(Call(Name(u"sorted"), [new], prefix=simple_expr.prefix)) else: raise RuntimeError("should not have reached here") sort_stmt.remove() btwn = sort_stmt.prefix # Keep any prefix lines between the sort_stmt and the list_call and # shove them right after the sorted() call. if u"\n" in btwn: if next_stmt: # The new prefix should be everything from the sort_stmt's # prefix up to the last newline, then the old prefix after a new # line. prefix_lines = (btwn.rpartition(u"\n")[0], next_stmt[0].prefix) next_stmt[0].prefix = u"\n".join(prefix_lines) else: assert list_call.parent assert list_call.next_sibling is None # Put a blank line after list_call and set its prefix. end_line = BlankLine() list_call.parent.append_child(end_line) assert list_call.next_sibling is end_line # The new prefix should be everything up to the first new line # of sort_stmt's prefix. end_line.prefix = btwn.rpartition(u"\n")[0] fixes/fix_idioms.pyc000064400000010671147204472210010541 0ustar00 {fc@smdZddlmZddlmZmZmZmZmZm Z dZ dZ dej fdYZ dS( sAdjust some old Python 2 idioms to their modern counterparts. * Change some type comparisons to isinstance() calls: type(x) == T -> isinstance(x, T) type(x) is T -> isinstance(x, T) type(x) != T -> not isinstance(x, T) type(x) is not T -> not isinstance(x, T) * Change "while 1:" into "while True:". * Change both v = list(EXPR) v.sort() foo(v) and the more general v = EXPR v.sort() foo(v) into v = sorted(EXPR) foo(v) i(t fixer_base(tCalltCommatNametNodet BlankLinetsymss0(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)s(power< 'type' trailer< '(' x=any ')' > >t FixIdiomscBsQeZeZdeeeefZdZdZdZ dZ dZ RS(s isinstance=comparison< %s %s T=any > | isinstance=comparison< T=any %s %s > | while_stmt< 'while' while='1' ':' any+ > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' power< list='list' trailer< '(' (not arglist) any ')' > > > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > cCsJtt|j|}|rFd|krF|d|dkrB|SdS|S(Ntsortedtid1tid2(tsuperRtmatchtNone(tselftnodetr((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyR Os cCsdd|kr|j||Sd|kr8|j||Sd|krT|j||StddS(Nt isinstancetwhileRs Invalid match(ttransform_isinstancettransform_whilettransform_sortt RuntimeError(RRtresults((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyt transformZs   cCs|dj}|dj}d|_d|_ttd|t|g}d|krd|_ttjtd|g}n|j|_|S(NtxtTuu u isinstancetnunot(tclonetprefixRRRRRtnot_test(RRRRRttest((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRds  !  ! cCs*|d}|jtdd|jdS(NRuTrueR(treplaceRR(RRRtone((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRps c Csv|d}|d}|jd}|jd}|rW|jtdd|jnR|r|j}d|_|jttd|gd|jn td|j|j}d |krr|r|jd d |d jf} d j | |d _qr|j st |j dks+t t} |j j| |j | ksYt |jd d | _ndS( NtsorttnexttlisttexprusortedRusshould not have reached hereu i(tgetR RRRRRtremovet rpartitiontjointparenttAssertionErrort next_siblingR Rt append_child( RRRt sort_stmtt next_stmtt list_callt simple_exprtnewtbtwnt prefix_linestend_line((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRts0          ( t__name__t __module__tTruetexplicittTYPEtCMPtPATTERNR RRRR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyR%s' N(t__doc__tRt fixer_utilRRRRRRR;R:tBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyts .fixes/fix_idioms.pyo000064400000010521147204472210010547 0ustar00 {fc@smdZddlmZddlmZmZmZmZmZm Z dZ dZ dej fdYZ dS( sAdjust some old Python 2 idioms to their modern counterparts. * Change some type comparisons to isinstance() calls: type(x) == T -> isinstance(x, T) type(x) is T -> isinstance(x, T) type(x) != T -> not isinstance(x, T) type(x) is not T -> not isinstance(x, T) * Change "while 1:" into "while True:". * Change both v = list(EXPR) v.sort() foo(v) and the more general v = EXPR v.sort() foo(v) into v = sorted(EXPR) foo(v) i(t fixer_base(tCalltCommatNametNodet BlankLinetsymss0(n='!=' | '==' | 'is' | n=comp_op< 'is' 'not' >)s(power< 'type' trailer< '(' x=any ')' > >t FixIdiomscBsQeZeZdeeeefZdZdZdZ dZ dZ RS(s isinstance=comparison< %s %s T=any > | isinstance=comparison< T=any %s %s > | while_stmt< 'while' while='1' ':' any+ > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' power< list='list' trailer< '(' (not arglist) any ')' > > > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > | sorted=any< any* simple_stmt< expr_stmt< id1=any '=' expr=any > '\n' > sort= simple_stmt< power< id2=any trailer< '.' 'sort' > trailer< '(' ')' > > '\n' > next=any* > cCsJtt|j|}|rFd|krF|d|dkrB|SdS|S(Ntsortedtid1tid2(tsuperRtmatchtNone(tselftnodetr((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyR Os cCsdd|kr|j||Sd|kr8|j||Sd|krT|j||StddS(Nt isinstancetwhileRs Invalid match(ttransform_isinstancettransform_whilettransform_sortt RuntimeError(RRtresults((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyt transformZs   cCs|dj}|dj}d|_d|_ttd|t|g}d|krd|_ttjtd|g}n|j|_|S(NtxtTuu u isinstancetnunot(tclonetprefixRRRRRtnot_test(RRRRRttest((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRds  !  ! cCs*|d}|jtdd|jdS(NRuTrueR(treplaceRR(RRRtone((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRps c Cs=|d}|d}|jd}|jd}|rW|jtdd|jnR|r|j}d|_|jttd|gd|jn td|j|j}d |kr9|r|jd d |d jf} d j | |d _q9t } |j j | |jd d | _ndS( NtsorttnexttlisttexprusortedRusshould not have reached hereu i( tgetR RRRRRtremovet rpartitiontjoinRtparentt append_child( RRRt sort_stmtt next_stmtt list_callt simple_exprtnewtbtwnt prefix_linestend_line((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyRts*          ( t__name__t __module__tTruetexplicittTYPEtCMPtPATTERNR RRRR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyR%s' N(t__doc__tRt fixer_utilRRRRRRR9R8tBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_idioms.pyts .fixes/fix_import.py000064400000006274147204472210010430 0ustar00"""Fixer for import statements. If spam is being imported from the local directory, this import: from spam import eggs Becomes: from .spam import eggs And this import: import spam Becomes: from . import spam """ # Local imports from .. import fixer_base from os.path import dirname, join, exists, sep from ..fixer_util import FromImport, syms, token def traverse_imports(names): """ Walks over all the names imported in a dotted_as_names node. """ pending = [names] while pending: node = pending.pop() if node.type == token.NAME: yield node.value elif node.type == syms.dotted_name: yield "".join([ch.value for ch in node.children]) elif node.type == syms.dotted_as_name: pending.append(node.children[0]) elif node.type == syms.dotted_as_names: pending.extend(node.children[::-2]) else: raise AssertionError("unknown node type") class FixImport(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' imp=any 'import' ['('] any [')'] > | import_name< 'import' imp=any > """ def start_tree(self, tree, name): super(FixImport, self).start_tree(tree, name) self.skip = "absolute_import" in tree.future_features def transform(self, node, results): if self.skip: return imp = results['imp'] if node.type == syms.import_from: # Some imps are top-level (eg: 'import ham') # some are first level (eg: 'import ham.eggs') # some are third level (eg: 'import ham.eggs as spam') # Hence, the loop while not hasattr(imp, 'value'): imp = imp.children[0] if self.probably_a_local_import(imp.value): imp.value = u"." + imp.value imp.changed() else: have_local = False have_absolute = False for mod_name in traverse_imports(imp): if self.probably_a_local_import(mod_name): have_local = True else: have_absolute = True if have_absolute: if have_local: # We won't handle both sibling and absolute imports in the # same statement at the moment. self.warning(node, "absolute and local imports together") return new = FromImport(u".", [imp]) new.prefix = node.prefix return new def probably_a_local_import(self, imp_name): if imp_name.startswith(u"."): # Relative imports are certainly not local imports. return False imp_name = imp_name.split(u".", 1)[0] base_path = dirname(self.filename) base_path = join(base_path, imp_name) # If there is no __init__.py next to the file its not in a package # so can't be a relative import. if not exists(join(dirname(base_path), "__init__.py")): return False for ext in [".py", sep, ".pyc", ".so", ".sl", ".pyd"]: if exists(base_path + ext): return True return False fixes/fix_import.pyc000064400000006265147204472210010573 0ustar00 {fc@szdZddlmZddlmZmZmZmZddlm Z m Z m Z dZ dej fdYZd S( sFixer for import statements. If spam is being imported from the local directory, this import: from spam import eggs Becomes: from .spam import eggs And this import: import spam Becomes: from . import spam i(t fixer_basei(tdirnametjointexiststsep(t FromImporttsymsttokenccs|g}x|r|j}|jtjkr;|jVq |jtjkrwdjg|jD]}|j^q]Vq |jtj kr|j |jdq |jtj kr|j |jdddq t dq WdS(sF Walks over all the names imported in a dotted_as_names node. tiNisunknown node type(tpopttypeRtNAMEtvalueRt dotted_nameRtchildrentdotted_as_nametappendtdotted_as_namestextendtAssertionError(tnamestpendingtnodetch((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyttraverse_importss    * t FixImportcBs/eZeZdZdZdZdZRS(sj import_from< 'from' imp=any 'import' ['('] any [')'] > | import_name< 'import' imp=any > cCs/tt|j||d|jk|_dS(Ntabsolute_import(tsuperRt start_treetfuture_featurestskip(tselfttreetname((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR/scCs|jr dS|d}|jtjkr~x t|dsK|jd}q,W|j|jrd|j|_|jqnt }t }x2t |D]$}|j|rt }qt }qW|r|r|j |dndSt d|g}|j|_|SdS(NtimpR iu.s#absolute and local imports together(RR Rt import_fromthasattrRtprobably_a_local_importR tchangedtFalseRtTruetwarningRtprefix(RRtresultsR"t have_localt have_absolutetmod_nametnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyt transform3s,     cCs|jdrtS|jddd}t|j}t||}ttt|dsftSx4dtdddd gD]}t||rtSqWtS( Nu.iis __init__.pys.pys.pycs.sos.sls.pyd( t startswithR'tsplitRtfilenameRRRR((Rtimp_namet base_pathtext((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR%Us(t__name__t __module__R(t BM_compatibletPATTERNRR0R%(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR&s   "N(t__doc__RRtos.pathRRRRt fixer_utilRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyt s " fixes/fix_imports.pyc000064400000012404147204472210010746 0ustar00 {fc@sdZddlmZddlmZmZi0dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dCdE6dFdG6dHdI6dJdK6dLdM6dNdO6dPdQ6dPdR6dPdS6dTdU6dVdW6dVdX6dYdZ6d[d\6Zd]Zed^Zd_ej fd`YZ daS(bs/Fix incompatible imports and module references.i(t fixer_base(tNamet attr_chaintiotStringIOt cStringIOtpickletcPickletbuiltinst __builtin__tcopyregtcopy_regtqueuetQueuet socketservert SocketServert configparsert ConfigParsertreprlibtreprstkinter.filedialogt FileDialogt tkFileDialogstkinter.simpledialogt SimpleDialogttkSimpleDialogstkinter.colorchooserttkColorChooserstkinter.commondialogttkCommonDialogstkinter.dialogtDialogs tkinter.dndtTkdnds tkinter.fontttkFontstkinter.messageboxt tkMessageBoxstkinter.scrolledtextt ScrolledTextstkinter.constantst Tkconstantss tkinter.tixtTixs tkinter.ttktttkttkintertTkintert _markupbaset markupbasetwinregt_winregt_threadtthreadt _dummy_threadt dummy_threadsdbm.bsdtdbhashsdbm.dumbtdumbdbmsdbm.ndbmtdbmsdbm.gnutgdbms xmlrpc.clientt xmlrpclibs xmlrpc.servertDocXMLRPCServertSimpleXMLRPCServers http.clientthttplibs html.entitiesthtmlentitydefss html.parsert HTMLParsers http.cookiestCookieshttp.cookiejart cookielibs http.servertBaseHTTPServertSimpleHTTPServert CGIHTTPServert subprocesstcommandst collectionst UserStringtUserLists urllib.parseturlparsesurllib.robotparsert robotparsercCsddjtt|dS(Nt(t|t)(tjointmapR(tmembers((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyt alternates=sccsldjg|D]}d|^q }t|j}d||fVd|Vd||fVd|VdS(Ns | smodule_name='%s'syname_import=import_name< 'import' ((%s) | multiple_imports=dotted_as_names< any* (%s) any* >) > simport_from< 'from' (%s) 'import' ['('] ( any | import_as_name< any 'as' any > | import_as_names< any* >) [')'] > simport_name< 'import' (dotted_as_name< (%s) 'as' any > | multiple_imports=dotted_as_names< any* dotted_as_name< (%s) 'as' any > any* >) > s3power< bare_with_attr=(%s) trailer<'.' any > any* >(RERHtkeys(tmappingtkeytmod_listt bare_names((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyt build_patternAs & t FixImportscBsMeZeZeZeZdZdZdZ dZ dZ dZ RS(icCsdjt|jS(NRC(RERNRJ(tself((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRN`scCs&|j|_tt|jdS(N(RNtPATTERNtsuperROtcompile_pattern(RP((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRScscsatt|j|}|r]d|krYtfdt|dDrYtS|StS(Ntbare_with_attrc3s|]}|VqdS(N((t.0tobj(tmatch(s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pys qstparent(RRRORWtanyRtFalse(RPtnodetresults((RWs1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRWjs  %cCs&tt|j||i|_dS(N(RRROt start_treetreplace(RPttreetfilename((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyR]vscCs|jd}|r|j}t|j|}|jt|d|jd|kri||j|sj    fixes/fix_imports2.pyc000064400000001172147204472210011030 0ustar00 {fc@sGdZddlmZidd6dd6ZdejfdYZdS( sTFix incompatible imports and module references that must be fixed after fix_imports.i(t fix_importstdbmtwhichdbtanydbmt FixImports2cBseZdZeZRS(i(t__name__t __module__t run_ordertMAPPINGtmapping(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_imports2.pyR sN(t__doc__tRRt FixImportsR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_imports2.pyts  fixes/fix_input.pyc000064400000002174147204472210010413 0ustar00 {fc@shdZddlmZddlmZmZddlmZejdZdej fdYZ dS( s4Fixer that changes input(...) into eval(input(...)).i(t fixer_base(tCalltName(tpatcomps&power< 'eval' trailer< '(' any ')' > >tFixInputcBseZeZdZdZRS(sL power< 'input' args=trailer< '(' [any] ')' > > cCsMtj|jjrdS|j}d|_ttd|gd|jS(Nuuevaltprefix(tcontexttmatchtparenttcloneRRR(tselftnodetresultstnew((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyt transforms   (t__name__t __module__tTruet BM_compatibletPATTERNR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyR sN( t__doc__tRt fixer_utilRRRtcompile_patternRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyts fixes/fix_intern.pyc000064400000003405147204472210010551 0ustar00 {fc@s_dZddlmZddlmZddlmZmZmZdejfdYZ dS(s/Fixer for intern(). intern(s) -> sys.intern(s)i(tpytree(t fixer_base(tNametAttrt touch_importt FixInterncBs#eZeZdZdZdZRS(tpres power< 'intern' trailer< lpar='(' ( not(arglist | argument) any ','> ) rpar=')' > after=any* > c Cso|rd|d}|rd|j|jjkr/dS|j|jjkra|jdjdkradSqdn|j}|dj}|j|jkr|j}ntj |j|jg}|d}|rg|D]}|j^q}ntj |j t t dt dtj |j |dj||djgg|}|j|_tdd||S( Ntobjis**tafterusysuinterntlpartrpar(ttypetsymst star_exprtargumenttchildrentvaluetclonetarglistRtNodetpowerRRttrailertprefixRtNone( tselftnodetresultsRR t newarglistRtntnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pyt transforms*    " U (t__name__t __module__tTruet BM_compatibletordertPATTERNR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pyRs N( t__doc__tRRt fixer_utilRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pytsfixes/fix_isinstance.pyc000064400000003474147204472210011420 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s,Fixer that cleans up a tuple argument to isinstance after the tokens in it were fixed. This is mainly used to remove double occurrences of tokens as a leftover of the long -> int / unicode -> str conversion. eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) -> isinstance(x, int) i(t fixer_base(ttokent FixIsinstancecBs#eZeZdZdZdZRS(s power< 'isinstance' trailer< '(' arglist< any ',' atom< '(' args=testlist_gexp< any+ > ')' > > ')' > > ic CsUt}|d}|j}g}t|}x|D]\}} | jtjkr| j|kr|t|dkr||djtjkr|j q5qq5|j | | jtjkr5|j | jq5q5W|r|djtjkr|d=nt|dkr@|j } | j |d_ | j|dn||(|jdS(Ntargsiii(tsettchildrent enumeratettypeRtNAMEtvaluetlentCOMMAtnexttappendtaddtparenttprefixtreplacetchanged( tselftnodetresultstnames_insertedttestlistRtnew_argstiteratortidxtargtatom((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyt transforms*    !0     (t__name__t __module__tTruet BM_compatibletPATTERNt run_orderR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyt sfixes/fix_isinstance.pyo000064400000003474147204472210011434 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s,Fixer that cleans up a tuple argument to isinstance after the tokens in it were fixed. This is mainly used to remove double occurrences of tokens as a leftover of the long -> int / unicode -> str conversion. eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) -> isinstance(x, int) i(t fixer_base(ttokent FixIsinstancecBs#eZeZdZdZdZRS(s power< 'isinstance' trailer< '(' arglist< any ',' atom< '(' args=testlist_gexp< any+ > ')' > > ')' > > ic CsUt}|d}|j}g}t|}x|D]\}} | jtjkr| j|kr|t|dkr||djtjkr|j q5qq5|j | | jtjkr5|j | jq5q5W|r|djtjkr|d=nt|dkr@|j } | j |d_ | j|dn||(|jdS(Ntargsiii(tsettchildrent enumeratettypeRtNAMEtvaluetlentCOMMAtnexttappendtaddtparenttprefixtreplacetchanged( tselftnodetresultstnames_insertedttestlistRtnew_argstiteratortidxtargtatom((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyt transforms*    !0     (t__name__t __module__tTruet BM_compatibletPATTERNt run_orderR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_isinstance.pyt sfixes/fix_itertools.pyc000064400000003415147204472210011277 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sT Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. i(t fixer_base(tNamet FixItertoolscBs0eZeZdZdeZdZdZRS(s7('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')s power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > icCsd}|dd}d|krt|jd krt|d|d}}|j}|j|j|jj|n|p|j}|jt|jdd|dS( Ntfuncititu ifilterfalseu izip_longesttdotitprefix(u ifilterfalseu izip_longest(tNonetvalueRtremovetparenttreplaceR(tselftnodetresultsRRRR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pyt transforms    ( t__name__t __module__tTruet BM_compatibletit_funcstlocalstPATTERNt run_orderR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pyRs  N(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pytsfixes/fix_itertools.pyo000064400000003415147204472210011313 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sT Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. i(t fixer_base(tNamet FixItertoolscBs0eZeZdZdeZdZdZRS(s7('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')s power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > icCsd}|dd}d|krt|jd krt|d|d}}|j}|j|j|jj|n|p|j}|jt|jdd|dS( Ntfuncititu ifilterfalseu izip_longesttdotitprefix(u ifilterfalseu izip_longest(tNonetvalueRtremovetparenttreplaceR(tselftnodetresultsRRRR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pyt transforms    ( t__name__t __module__tTruet BM_compatibletit_funcstlocalstPATTERNt run_orderR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pyRs  N(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_itertools.pytsfixes/fix_itertools_imports.pyc000064400000003756147204472210013064 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(sA Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) i(t fixer_base(t BlankLinetsymsttokentFixItertoolsImportscBs$eZeZdeZdZRS(sT import_from< 'from' 'itertools' 'import' imports=any > c Cs|d}|jtjks&|j r2|g}n |j}x|dddD]}|jtjkry|j}|}n;|jtjkrdS|jtjkst|jd}|j}|dkrd|_|j qO|dkrO|j |d d kr d nd |_qOqOW|jp+|g}t } x=|D]5}| rf|jtj krf|j q;| t N} q;Wx0|r|d jtj kr|jj qwW|jpt|dd s|jdkr|j} t}| |_|SdS(Ntimportsiiuimapuizipuifilteru ifilterfalseu izip_longestiufu filterfalseu zip_longestitvalue(uimapuizipuifilter(u ifilterfalseu izip_longest(ttypeRtimport_as_nametchildrenRtNAMERtSTARtAssertionErrortNonetremovetchangedtTruetCOMMAtpoptgetattrtparenttprefixR( tselftnodetresultsRR tchildtmembert name_nodet member_namet remove_commatp((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pyt transformsD                 (t__name__t __module__Rt BM_compatibletlocalstPATTERNR(((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pyRs N( t__doc__tlib2to3Rtlib2to3.fixer_utilRRRtBaseFixR(((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pytsfixes/fix_itertools_imports.pyo000064400000003701147204472210013066 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(sA Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) i(t fixer_base(t BlankLinetsymsttokentFixItertoolsImportscBs$eZeZdeZdZRS(sT import_from< 'from' 'itertools' 'import' imports=any > c Cs|d}|jtjks&|j r2|g}n |j}x|dddD]}|jtjkry|j}|}n#|jtjkrdS|jd}|j}|dkrd|_|j qO|dkrO|j |d d krd nd |_qOqOW|jp|g}t } x=|D]5}| rN|jtj krN|j q#| t N} q#Wx0|r|d jtj kr|j j q_W|jpt|dd s|jdkr|j} t}| |_|SdS(Ntimportsiiuimapuizipuifilteru ifilterfalseu izip_longestiufu filterfalseu zip_longestitvalue(uimapuizipuifilter(u ifilterfalseu izip_longest(ttypeRtimport_as_nametchildrenRtNAMERtSTARtNonetremovetchangedtTruetCOMMAtpoptgetattrtparenttprefixR( tselftnodetresultsRR tchildtmembert name_nodet member_namet remove_commatp((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pyt transformsB                 (t__name__t __module__Rt BM_compatibletlocalstPATTERNR(((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pyRs N( t__doc__tlib2to3Rtlib2to3.fixer_utilRRRtBaseFixR(((s;/usr/lib64/python2.7/lib2to3/fixes/fix_itertools_imports.pytsfixes/fix_long.py000064400000000735147204472210010051 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns 'long' into 'int' everywhere. """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import is_probably_builtin class FixLong(fixer_base.BaseFix): BM_compatible = True PATTERN = "'long'" def transform(self, node, results): if is_probably_builtin(node): node.value = u"int" node.changed() fixes/fix_long.pyc000064400000001527147204472210010214 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s/Fixer that turns 'long' into 'int' everywhere. i(t fixer_base(tis_probably_builtintFixLongcBseZeZdZdZRS(s'long'cCs&t|r"d|_|jndS(Nuint(Rtvaluetchanged(tselftnodetresults((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pyt transforms  (t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pyR sN(t__doc__tlib2to3Rtlib2to3.fixer_utilRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pytsfixes/fix_long.pyo000064400000001527147204472210010230 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s/Fixer that turns 'long' into 'int' everywhere. i(t fixer_base(tis_probably_builtintFixLongcBseZeZdZdZRS(s'long'cCs&t|r"d|_|jndS(Nuint(Rtvaluetchanged(tselftnodetresults((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pyt transforms  (t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pyR sN(t__doc__tlib2to3Rtlib2to3.fixer_utilRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_long.pytsfixes/fix_map.py000064400000005766147204472210007700 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes map(F, ...) into list(map(F, ...)) unless there exists a 'from future_builtins import map' statement in the top-level namespace. As a special case, map(None, X) is changed into list(X). (This is necessary because the semantics are changed in this case -- the new map(None, X) is equivalent to [(x,) for x in X].) We avoid the transformation (except for the special case mentioned above) if the map() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on map(F, X, Y, ...) to go on until the longest argument is exhausted, substituting None for missing values -- like zip(), it now stops as soon as the shortest argument is exhausted. """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ListComp, in_special_context from ..pygram import python_symbols as syms class FixMap(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ map_none=power< 'map' trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > > | map_lambda=power< 'map' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'map' trailer< '(' [arglist=any] ')' > > """ skip_on = 'future_builtins.map' def transform(self, node, results): if self.should_skip(node): return if node.parent.type == syms.simple_stmt: self.warning(node, "You should use a for loop here") new = node.clone() new.prefix = u"" new = Call(Name(u"list"), [new]) elif "map_lambda" in results: new = ListComp(results["xp"].clone(), results["fp"].clone(), results["it"].clone()) else: if "map_none" in results: new = results["arg"].clone() else: if "arglist" in results: args = results["arglist"] if args.type == syms.arglist and \ args.children[0].type == token.NAME and \ args.children[0].value == "None": self.warning(node, "cannot convert map(None, ...) " "with multiple arguments because map() " "now truncates to the shortest sequence") return if in_special_context(node): return None new = node.clone() new.prefix = u"" new = Call(Name(u"list"), [new]) new.prefix = node.prefix return new fixes/fix_map.pyc000064400000005756147204472210010042 0ustar00 {fc@sudZddlmZddlmZddlmZmZmZm Z ddl m Z dej fdYZdS( sFixer that changes map(F, ...) into list(map(F, ...)) unless there exists a 'from future_builtins import map' statement in the top-level namespace. As a special case, map(None, X) is changed into list(X). (This is necessary because the semantics are changed in this case -- the new map(None, X) is equivalent to [(x,) for x in X].) We avoid the transformation (except for the special case mentioned above) if the map() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on map(F, X, Y, ...) to go on until the longest argument is exhausted, substituting None for missing values -- like zip(), it now stops as soon as the shortest argument is exhausted. i(ttoken(t fixer_base(tNametCalltListComptin_special_context(tpython_symbolstFixMapcBs#eZeZdZdZdZRS(s map_none=power< 'map' trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > > | map_lambda=power< 'map' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'map' trailer< '(' [arglist=any] ')' > > sfuture_builtins.mapcCs|j|rdS|jjtjkrh|j|d|j}d|_tt d|g}n d|krt |dj|dj|dj}nd|kr|d j}nd |kr4|d }|jtj kr4|j d jt jkr4|j d jd kr4|j|d dSnt|rDdS|j}d|_tt d|g}|j|_|S(NsYou should use a for loop hereuulistt map_lambdatxptfptittmap_nonetargtarglistitNonesjcannot convert map(None, ...) with multiple arguments because map() now truncates to the shortest sequence(t should_skiptparentttypetsymst simple_stmttwarningtclonetprefixRRRRtchildrenRtNAMEtvalueRR(tselftnodetresultstnewtargs((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyt transform;s6           (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR (((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyRsN(t__doc__tpgen2RtRt fixer_utilRRRRtpygramRRtConditionalFixR(((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyts "fixes/fix_map.pyo000064400000005756147204472210010056 0ustar00 {fc@sudZddlmZddlmZddlmZmZmZm Z ddl m Z dej fdYZdS( sFixer that changes map(F, ...) into list(map(F, ...)) unless there exists a 'from future_builtins import map' statement in the top-level namespace. As a special case, map(None, X) is changed into list(X). (This is necessary because the semantics are changed in this case -- the new map(None, X) is equivalent to [(x,) for x in X].) We avoid the transformation (except for the special case mentioned above) if the map() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on map(F, X, Y, ...) to go on until the longest argument is exhausted, substituting None for missing values -- like zip(), it now stops as soon as the shortest argument is exhausted. i(ttoken(t fixer_base(tNametCalltListComptin_special_context(tpython_symbolstFixMapcBs#eZeZdZdZdZRS(s map_none=power< 'map' trailer< '(' arglist< 'None' ',' arg=any [','] > ')' > > | map_lambda=power< 'map' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'map' trailer< '(' [arglist=any] ')' > > sfuture_builtins.mapcCs|j|rdS|jjtjkrh|j|d|j}d|_tt d|g}n d|krt |dj|dj|dj}nd|kr|d j}nd |kr4|d }|jtj kr4|j d jt jkr4|j d jd kr4|j|d dSnt|rDdS|j}d|_tt d|g}|j|_|S(NsYou should use a for loop hereuulistt map_lambdatxptfptittmap_nonetargtarglistitNonesjcannot convert map(None, ...) with multiple arguments because map() now truncates to the shortest sequence(t should_skiptparentttypetsymst simple_stmttwarningtclonetprefixRRRRtchildrenRtNAMEtvalueRR(tselftnodetresultstnewtargs((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyt transform;s6           (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR (((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyRsN(t__doc__tpgen2RtRt fixer_utilRRRRtpygramRRtConditionalFixR(((s-/usr/lib64/python2.7/lib2to3/fixes/fix_map.pyts "fixes/fix_metaclass.py000064400000020030147204472210011054 0ustar00"""Fixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. """ # Author: Jack Diederich # Local imports from .. import fixer_base from ..pygram import token from ..fixer_util import Name, syms, Node, Leaf def has_metaclass(parent): """ we have to check the cls_node without changing it. There are two possibilities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') """ for node in parent.children: if node.type == syms.suite: return has_metaclass(node) elif node.type == syms.simple_stmt and node.children: expr_node = node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: left_side = expr_node.children[0] if isinstance(left_side, Leaf) and \ left_side.value == '__metaclass__': return True return False def fixup_parse_tree(cls_node): """ one-line classes don't get a suite in the parse tree so we add one to normalize the tree """ for node in cls_node.children: if node.type == syms.suite: # already in the preferred format, do nothing return # !%@#! oneliners have no suite node, we have to fake one up for i, node in enumerate(cls_node.children): if node.type == token.COLON: break else: raise ValueError("No class suite and no ':'!") # move everything into a suite node suite = Node(syms.suite, []) while cls_node.children[i+1:]: move_node = cls_node.children[i+1] suite.append_child(move_node.clone()) move_node.remove() cls_node.append_child(suite) node = suite def fixup_simple_stmt(parent, i, stmt_node): """ if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything after the semi-colon into its own simple_stmt node """ for semi_ind, node in enumerate(stmt_node.children): if node.type == token.SEMI: # *sigh* break else: return node.remove() # kill the semicolon new_expr = Node(syms.expr_stmt, []) new_stmt = Node(syms.simple_stmt, [new_expr]) while stmt_node.children[semi_ind:]: move_node = stmt_node.children[semi_ind] new_expr.append_child(move_node.clone()) move_node.remove() parent.insert_child(i, new_stmt) new_leaf1 = new_stmt.children[0].children[0] old_leaf1 = stmt_node.children[0].children[0] new_leaf1.prefix = old_leaf1.prefix def remove_trailing_newline(node): if node.children and node.children[-1].type == token.NEWLINE: node.children[-1].remove() def find_metas(cls_node): # find the suite node (Mmm, sweet nodes) for node in cls_node.children: if node.type == syms.suite: break else: raise ValueError("No class suite!") # look for simple_stmt[ expr_stmt[ Leaf('__metaclass__') ] ] for i, simple_node in list(enumerate(node.children)): if simple_node.type == syms.simple_stmt and simple_node.children: expr_node = simple_node.children[0] if expr_node.type == syms.expr_stmt and expr_node.children: # Check if the expr_node is a simple assignment. left_node = expr_node.children[0] if isinstance(left_node, Leaf) and \ left_node.value == u'__metaclass__': # We found an assignment to __metaclass__. fixup_simple_stmt(node, i, simple_node) remove_trailing_newline(simple_node) yield (node, i, simple_node) def fixup_indent(suite): """ If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start """ kids = suite.children[::-1] # find the first indent while kids: node = kids.pop() if node.type == token.INDENT: break # find the first Leaf while kids: node = kids.pop() if isinstance(node, Leaf) and node.type != token.DEDENT: if node.prefix: node.prefix = u'' return else: kids.extend(node.children[::-1]) class FixMetaclass(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef """ def transform(self, node, results): if not has_metaclass(node): return fixup_parse_tree(node) # find metaclasses, keep the last one last_metaclass = None for suite, i, stmt in find_metas(node): last_metaclass = stmt stmt.remove() text_type = node.children[0].type # always Leaf(nnn, 'class') # figure out what kind of classdef we have if len(node.children) == 7: # Node(classdef, ['class', 'name', '(', arglist, ')', ':', suite]) # 0 1 2 3 4 5 6 if node.children[3].type == syms.arglist: arglist = node.children[3] # Node(classdef, ['class', 'name', '(', 'Parent', ')', ':', suite]) else: parent = node.children[3].clone() arglist = Node(syms.arglist, [parent]) node.set_child(3, arglist) elif len(node.children) == 6: # Node(classdef, ['class', 'name', '(', ')', ':', suite]) # 0 1 2 3 4 5 arglist = Node(syms.arglist, []) node.insert_child(3, arglist) elif len(node.children) == 4: # Node(classdef, ['class', 'name', ':', suite]) # 0 1 2 3 arglist = Node(syms.arglist, []) node.insert_child(2, Leaf(token.RPAR, u')')) node.insert_child(2, arglist) node.insert_child(2, Leaf(token.LPAR, u'(')) else: raise ValueError("Unexpected class definition") # now stick the metaclass in the arglist meta_txt = last_metaclass.children[0].children[0] meta_txt.value = 'metaclass' orig_meta_prefix = meta_txt.prefix if arglist.children: arglist.append_child(Leaf(token.COMMA, u',')) meta_txt.prefix = u' ' else: meta_txt.prefix = u'' # compact the expression "metaclass = Meta" -> "metaclass=Meta" expr_stmt = last_metaclass.children[0] assert expr_stmt.type == syms.expr_stmt expr_stmt.children[1].prefix = u'' expr_stmt.children[2].prefix = u'' arglist.append_child(last_metaclass) fixup_indent(suite) # check for empty suite if not suite.children: # one-liner that was just __metaclass_ suite.remove() pass_leaf = Leaf(text_type, u'pass') pass_leaf.prefix = orig_meta_prefix node.append_child(pass_leaf) node.append_child(Leaf(token.NEWLINE, u'\n')) elif len(suite.children) > 1 and \ (suite.children[-2].type == token.INDENT and suite.children[-1].type == token.DEDENT): # there was only one line in the class body and it was __metaclass__ pass_leaf = Leaf(text_type, u'pass') suite.insert_child(-1, pass_leaf) suite.insert_child(-1, Leaf(token.NEWLINE, u'\n')) fixes/fix_metaclass.pyc000064400000014715147204472210011234 0ustar00 {fc@sdZddlmZddlmZddlmZmZmZm Z dZ dZ dZ dZ d Zd Zd ejfd YZd S(sFixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. i(t fixer_base(ttoken(tNametsymstNodetLeafcCsx|jD]}|jtjkr,t|S|jtjkr |jr |jd}|jtjkr|jr|jd}t|tr|j dkrt Sqq q Wt S(s we have to check the cls_node without changing it. There are two possibilities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') it __metaclass__( tchildrenttypeRtsuitet has_metaclasst simple_stmtt expr_stmtt isinstanceRtvaluetTruetFalse(tparenttnodet expr_nodet left_side((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyR s   cCsx'|jD]}|jtjkr dSq Wx?t|jD]"\}}|jtjkr:Pq:q:Wtdttjg}xC|j|dr|j|d}|j |j |j qW|j ||}dS(sf one-line classes don't get a suite in the parse tree so we add one to normalize the tree NsNo class suite and no ':'!i( RRRR t enumerateRtCOLONt ValueErrorRt append_childtclonetremove(tcls_nodeRtiR t move_node((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytfixup_parse_tree-s  c Csx7t|jD]"\}}|jtjkrPqqWdS|jttjg}ttj |g}x;|j|r|j|}|j |j |jqnW|j |||jdjd}|jdjd} | j |_ dS(s if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything after the semi-colon into its own simple_stmt node Ni(RRRRtSEMIRRRR R RRt insert_childtprefix( RRt stmt_nodetsemi_indRtnew_exprtnew_stmtRt new_leaf1t old_leaf1((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytfixup_simple_stmtGs  cCs:|jr6|jdjtjkr6|jdjndS(Ni(RRRtNEWLINER(R((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytremove_trailing_newline_s"ccsx3|jD]}|jtjkr Pq q Wtdxtt|jD]\}}|jtjkrL|jrL|jd}|jtjkr|jr|jd}t |t r|j dkrt |||t ||||fVqqqLqLWdS(NsNo class suite!iu __metaclass__(RRRR RtlistRR R R RRR(R*(RRRt simple_nodeRt left_node((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt find_metasds "   cCs|jddd}x,|rD|j}|jtjkrPqqWxm|r|j}t|tr|jtjkr|jrd|_ndS|j |jdddqHWdS(s If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start Niu( RtpopRRtINDENTR RtDEDENTR!textend(R tkidsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt fixup_indent{s    !  t FixMetaclasscBseZeZdZdZRS(s classdef cCs't|sdSt|d}x-t|D]\}}}|}|jq-W|jdj}t|jdkr|jdjtj kr|jd}q|jdj } t tj | g}|j d|nt|jdkrt tj g}|j d|n~t|jdkrt tj g}|j dttjd|j d||j dttjdn td |jdjd} d | _| j} |jr|jttjd d | _n d | _|jd} | jtjkstd | jd_d | jd_|j|t||js|jt|d} | | _|j| |jttjdnt|jdkr#|jdjtjkr#|jdjtjkr#t|d} |j d| |j dttjdndS(Niiiiiiu)u(sUnexpected class definitiont metaclassu,u uiupassu ii(R RtNoneR.RRRtlenRtarglistRRt set_childR RRtRPARtLPARRRR!RtCOMMAR tAssertionErrorR4R)R0R1(tselfRtresultstlast_metaclassR Rtstmtt text_typeR9Rtmeta_txttorig_meta_prefixR t pass_leaf((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt transforms`               (t__name__t __module__Rt BM_compatibletPATTERNRG(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyR5sN(t__doc__tRtpygramRt fixer_utilRRRRR RR(R*R.R4tBaseFixR5(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyts"      fixes/fix_metaclass.pyo000064400000014633147204472210011247 0ustar00 {fc@sdZddlmZddlmZddlmZmZmZm Z dZ dZ dZ dZ d Zd Zd ejfd YZd S(sFixer for __metaclass__ = X -> (metaclass=X) methods. The various forms of classef (inherits nothing, inherits once, inherints many) don't parse the same in the CST so we look at ALL classes for a __metaclass__ and if we find one normalize the inherits to all be an arglist. For one-liner classes ('class X: pass') there is no indent/dedent so we normalize those into having a suite. Moving the __metaclass__ into the classdef can also cause the class body to be empty so there is some special casing for that as well. This fixer also tries very hard to keep original indenting and spacing in all those corner cases. i(t fixer_base(ttoken(tNametsymstNodetLeafcCsx|jD]}|jtjkr,t|S|jtjkr |jr |jd}|jtjkr|jr|jd}t|tr|j dkrt Sqq q Wt S(s we have to check the cls_node without changing it. There are two possibilities: 1) clsdef => suite => simple_stmt => expr_stmt => Leaf('__meta') 2) clsdef => simple_stmt => expr_stmt => Leaf('__meta') it __metaclass__( tchildrenttypeRtsuitet has_metaclasst simple_stmtt expr_stmtt isinstanceRtvaluetTruetFalse(tparenttnodet expr_nodet left_side((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyR s   cCsx'|jD]}|jtjkr dSq Wx?t|jD]"\}}|jtjkr:Pq:q:Wtdttjg}xC|j|dr|j|d}|j |j |j qW|j ||}dS(sf one-line classes don't get a suite in the parse tree so we add one to normalize the tree NsNo class suite and no ':'!i( RRRR t enumerateRtCOLONt ValueErrorRt append_childtclonetremove(tcls_nodeRtiR t move_node((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytfixup_parse_tree-s  c Csx7t|jD]"\}}|jtjkrPqqWdS|jttjg}ttj |g}x;|j|r|j|}|j |j |jqnW|j |||jdjd}|jdjd} | j |_ dS(s if there is a semi-colon all the parts count as part of the same simple_stmt. We just want the __metaclass__ part so we move everything after the semi-colon into its own simple_stmt node Ni(RRRRtSEMIRRRR R RRt insert_childtprefix( RRt stmt_nodetsemi_indRtnew_exprtnew_stmtRt new_leaf1t old_leaf1((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytfixup_simple_stmtGs  cCs:|jr6|jdjtjkr6|jdjndS(Ni(RRRtNEWLINER(R((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pytremove_trailing_newline_s"ccsx3|jD]}|jtjkr Pq q Wtdxtt|jD]\}}|jtjkrL|jrL|jd}|jtjkr|jr|jd}t |t r|j dkrt |||t ||||fVqqqLqLWdS(NsNo class suite!iu __metaclass__(RRRR RtlistRR R R RRR(R*(RRRt simple_nodeRt left_node((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt find_metasds "   cCs|jddd}x,|rD|j}|jtjkrPqqWxm|r|j}t|tr|jtjkr|jrd|_ndS|j |jdddqHWdS(s If an INDENT is followed by a thing with a prefix then nuke the prefix Otherwise we get in trouble when removing __metaclass__ at suite start Niu( RtpopRRtINDENTR RtDEDENTR!textend(R tkidsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt fixup_indent{s    !  t FixMetaclasscBseZeZdZdZRS(s classdef cCst|sdSt|d}x-t|D]\}}}|}|jq-W|jdj}t|jdkr|jdjtj kr|jd}q|jdj } t tj | g}|j d|nt|jdkrt tj g}|j d|n~t|jdkrt tj g}|j dttjd|j d||j dttjdn td |jdjd} d | _| j} |jr|jttjd d | _n d | _|jd} d | jd_d | jd_|j|t||js|jt|d} | | _|j| |jttjdnt|jdkr |jdjtjkr |jdjtjkr t|d} |j d| |j dttjdndS(Niiiiiiu)u(sUnexpected class definitiont metaclassu,u uiupassu ii(R RtNoneR.RRRtlenRtarglistRRt set_childR RRtRPARtLPARRRR!RtCOMMAR4R)R0R1(tselfRtresultstlast_metaclassR Rtstmtt text_typeR9Rtmeta_txttorig_meta_prefixR t pass_leaf((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyt transforms^               (t__name__t __module__Rt BM_compatibletPATTERNRF(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyR5sN(t__doc__tRtpygramRt fixer_utilRRRRR RR(R*R.R4tBaseFixR5(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_metaclass.pyts"      fixes/fix_methodattrs.py000064400000001147147204472210011446 0ustar00"""Fix bound method attributes (method.im_? -> method.__?__). """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name MAP = { "im_func" : "__func__", "im_self" : "__self__", "im_class" : "__self__.__class__" } class FixMethodattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > """ def transform(self, node, results): attr = results["attr"][0] new = unicode(MAP[attr.value]) attr.replace(Name(new, prefix=attr.prefix)) fixes/fix_methodattrs.pyc000064400000002200147204472210011600 0ustar00 {fc@s^dZddlmZddlmZidd6dd6dd 6Zd ejfd YZd S( s;Fix bound method attributes (method.im_? -> method.__?__). i(t fixer_base(tNamet__func__tim_funct__self__tim_selfs__self__.__class__tim_classtFixMethodattrscBseZeZdZdZRS(sU power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > cCsA|dd}tt|j}|jt|d|jdS(Ntattritprefix(tunicodetMAPtvaluetreplaceRR (tselftnodetresultsRtnew((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyRsN(t__doc__tRt fixer_utilRR tBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyts fixes/fix_methodattrs.pyo000064400000002200147204472210011614 0ustar00 {fc@s^dZddlmZddlmZidd6dd6dd 6Zd ejfd YZd S( s;Fix bound method attributes (method.im_? -> method.__?__). i(t fixer_base(tNamet__func__tim_funct__self__tim_selfs__self__.__class__tim_classtFixMethodattrscBseZeZdZdZRS(sU power< any+ trailer< '.' attr=('im_func' | 'im_self' | 'im_class') > any* > cCsA|dd}tt|j}|jt|d|jdS(Ntattritprefix(tunicodetMAPtvaluetreplaceRR (tselftnodetresultsRtnew((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyRsN(t__doc__tRt fixer_utilRR tBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_methodattrs.pyts fixes/fix_ne.pyc000064400000001751147204472210007656 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(sFixer that turns <> into !=.i(tpytree(ttoken(t fixer_basetFixNecBs#eZejZdZdZRS(cCs |jdkS(Nu<>(tvalue(tselftnode((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pytmatchscCs"tjtjdd|j}|S(Nu!=tprefix(RtLeafRtNOTEQUALR(RRtresultstnew((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pyt transforms(t__name__t __module__RR t _accept_typeRR (((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pyR s  N(t__doc__tRtpgen2RRtBaseFixR(((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pytsfixes/fix_ne.pyo000064400000001751147204472210007672 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(sFixer that turns <> into !=.i(tpytree(ttoken(t fixer_basetFixNecBs#eZejZdZdZRS(cCs |jdkS(Nu<>(tvalue(tselftnode((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pytmatchscCs"tjtjdd|j}|S(Nu!=tprefix(RtLeafRtNOTEQUALR(RRtresultstnew((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pyt transforms(t__name__t __module__RR t _accept_typeRR (((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pyR s  N(t__doc__tRtpgen2RRtBaseFixR(((s,/usr/lib64/python2.7/lib2to3/fixes/fix_ne.pytsfixes/fix_next.pyc000064400000006743147204472210010240 0ustar00 {fc@sdZddlmZddlmZddlmZddlm Z m Z m Z dZ dej fdYZd Zd Zd Zd S( s.Fixer for it.next() -> next(it), per PEP 3114.i(ttoken(tpython_symbols(t fixer_base(tNametCallt find_bindings;Calls to builtin next() possibly shadowed by global bindingtFixNextcBs,eZeZdZdZdZdZRS(s power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > | power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > | classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='next' parameters< '(' NAME ')' > any+ > any* > > | global=global_stmt< 'global' any* 'next' any* > tprecCsWtt|j||td|}|rJ|j|tt|_n t|_dS(Nunext( tsuperRt start_treeRtwarningt bind_warningtTruet shadowed_nexttFalse(tselfttreetfilenametn((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR $s  cCs|s t|jd}|jd}|jd}|r|jrg|jtdd|jqg|D]}|j^qn}d|d_|jttdd|j|n|rtdd|j}|j|n|rct|rM|d }d j g|D]}t |^qj d krI|j |t ndS|jtdn(d |kr|j |t t|_ndS( Ntbasetattrtnameu__next__tprefixuiunexttheadtu __builtin__tglobal(tAssertionErrortgetR treplaceRRtcloneRtis_assign_targettjointstrtstripR R R (RtnodetresultsRRRRR((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyt transform.s.   (  4 (t__name__t __module__R t BM_compatibletPATTERNtorderR R$(((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyRs  cCs]t|}|dkrtSx:|jD]/}|jtjkrBtSt||r&tSq&WtS(N( t find_assigntNoneRtchildrenttypeRtEQUALt is_subtreeR (R"tassigntchild((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyRQs  cCsH|jtjkr|S|jtjks7|jdkr;dSt|jS(N(R-tsymst expr_stmtt simple_stmttparentR+R*(R"((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR*]s !cs-|krtStfd|jDS(Nc3s|]}t|VqdS(N(R/(t.0tc(R"(s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pys gs(R tanyR,(trootR"((R"s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR/ds N(t__doc__tpgen2RtpygramRR2RRt fixer_utilRRRR tBaseFixRRR*R/(((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyts@ fixes/fix_next.pyo000064400000006702147204472210010247 0ustar00 {fc@sdZddlmZddlmZddlmZddlm Z m Z m Z dZ dej fdYZd Zd Zd Zd S( s.Fixer for it.next() -> next(it), per PEP 3114.i(ttoken(tpython_symbols(t fixer_base(tNametCallt find_bindings;Calls to builtin next() possibly shadowed by global bindingtFixNextcBs,eZeZdZdZdZdZRS(s power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > | power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > | classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='next' parameters< '(' NAME ')' > any+ > any* > > | global=global_stmt< 'global' any* 'next' any* > tprecCsWtt|j||td|}|rJ|j|tt|_n t|_dS(Nunext( tsuperRt start_treeRtwarningt bind_warningtTruet shadowed_nexttFalse(tselfttreetfilenametn((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR $s  cCs|jd}|jd}|jd}|r|jr[|jtdd|jqg|D]}|j^qb}d|d_|jttdd|j|n|rtdd|j}|j|n|rWt|rA|d }d jg|D]}t |^qj d kr=|j |t ndS|jtdn(d |kr|j |t t |_ndS( Ntbasetattrtnameu__next__tprefixuiunexttheadtu __builtin__tglobal(tgetR treplaceRRtcloneRtis_assign_targettjointstrtstripR R R (RtnodetresultsRRRRR((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyt transform.s,  (  4 (t__name__t __module__R t BM_compatibletPATTERNtorderR R#(((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyRs  cCs]t|}|dkrtSx:|jD]/}|jtjkrBtSt||r&tSq&WtS(N( t find_assigntNoneRtchildrenttypeRtEQUALt is_subtreeR (R!tassigntchild((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyRQs  cCsH|jtjkr|S|jtjks7|jdkr;dSt|jS(N(R,tsymst expr_stmtt simple_stmttparentR*R)(R!((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR)]s !cs-|krtStfd|jDS(Nc3s|]}t|VqdS(N(R.(t.0tc(R!(s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pys gs(R tanyR+(trootR!((R!s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyR.ds N(t__doc__tpgen2RtpygramRR1RRt fixer_utilRRRR tBaseFixRRR)R.(((s./usr/lib64/python2.7/lib2to3/fixes/fix_next.pyts@ fixes/fix_nonzero.py000064400000001126147204472210010577 0ustar00"""Fixer for __nonzero__ -> __bool__ methods.""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name, syms class FixNonzero(fixer_base.BaseFix): BM_compatible = True PATTERN = """ classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='__nonzero__' parameters< '(' NAME ')' > any+ > any* > > """ def transform(self, node, results): name = results["name"] new = Name(u"__bool__", prefix=name.prefix) name.replace(new) fixes/fix_nonzero.pyc000064400000002114147204472210010740 0ustar00 {fc@sIdZddlmZddlmZmZdejfdYZdS(s*Fixer for __nonzero__ -> __bool__ methods.i(t fixer_base(tNametsymst FixNonzerocBseZeZdZdZRS(s classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='__nonzero__' parameters< '(' NAME ')' > any+ > any* > > cCs0|d}tdd|j}|j|dS(Ntnameu__bool__tprefix(RRtreplace(tselftnodetresultsRtnew((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pyRsN(t__doc__tRt fixer_utilRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pytsfixes/fix_nonzero.pyo000064400000002114147204472210010754 0ustar00 {fc@sIdZddlmZddlmZmZdejfdYZdS(s*Fixer for __nonzero__ -> __bool__ methods.i(t fixer_base(tNametsymst FixNonzerocBseZeZdZdZRS(s classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='__nonzero__' parameters< '(' NAME ')' > any+ > any* > > cCs0|d}tdd|j}|j|dS(Ntnameu__bool__tprefix(RRtreplace(tselftnodetresultsRtnew((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pyRsN(t__doc__tRt fixer_utilRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_nonzero.pytsfixes/fix_numliterals.py000064400000001405147204472210011444 0ustar00"""Fixer that turns 1L into 1, 0755 into 0o755. """ # Copyright 2007 Georg Brandl. # Licensed to PSF under a Contributor Agreement. # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Number class FixNumliterals(fixer_base.BaseFix): # This is so simple that we don't need the pattern compiler. _accept_type = token.NUMBER def match(self, node): # Override return (node.value.startswith(u"0") or node.value[-1] in u"Ll") def transform(self, node, results): val = node.value if val[-1] in u'Ll': val = val[:-1] elif val.startswith(u'0') and val.isdigit() and len(set(val)) > 1: val = u"0o" + val[1:] return Number(val, prefix=node.prefix) fixes/fix_numliterals.pyc000064400000002361147204472210011611 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(s-Fixer that turns 1L into 1, 0755 into 0o755. i(ttoken(t fixer_base(tNumbertFixNumliteralscBs#eZejZdZdZRS(cCs#|jjdp"|jddkS(Nu0iuLl(tvaluet startswith(tselftnode((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pytmatchscCs}|j}|ddkr&|d }nD|jdrj|jrjtt|dkrjd|d}nt|d|jS(NiuLlu0iu0otprefix(RRtisdigittlentsetRR (RRtresultstval((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pyt transforms   3(t__name__t __module__RtNUMBERt _accept_typeRR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pyR s  N( t__doc__tpgen2RtRt fixer_utilRtBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pytsfixes/fix_operator.py000064400000006620147204472210010744 0ustar00"""Fixer for operator functions. operator.isCallable(obj) -> hasattr(obj, '__call__') operator.sequenceIncludes(obj) -> operator.contains(obj) operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) operator.isNumberType(obj) -> isinstance(obj, numbers.Number) operator.repeat(obj, n) -> operator.mul(obj, n) operator.irepeat(obj, n) -> operator.imul(obj, n) """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import Call, Name, String, touch_import def invocation(s): def dec(f): f.invocation = s return f return dec class FixOperator(fixer_base.BaseFix): BM_compatible = True order = "pre" methods = """ method=('isCallable'|'sequenceIncludes' |'isSequenceType'|'isMappingType'|'isNumberType' |'repeat'|'irepeat') """ obj = "'(' obj=any ')'" PATTERN = """ power< module='operator' trailer< '.' %(methods)s > trailer< %(obj)s > > | power< %(methods)s trailer< %(obj)s > > """ % dict(methods=methods, obj=obj) def transform(self, node, results): method = self._check_method(node, results) if method is not None: return method(node, results) @invocation("operator.contains(%s)") def _sequenceIncludes(self, node, results): return self._handle_rename(node, results, u"contains") @invocation("hasattr(%s, '__call__')") def _isCallable(self, node, results): obj = results["obj"] args = [obj.clone(), String(u", "), String(u"'__call__'")] return Call(Name(u"hasattr"), args, prefix=node.prefix) @invocation("operator.mul(%s)") def _repeat(self, node, results): return self._handle_rename(node, results, u"mul") @invocation("operator.imul(%s)") def _irepeat(self, node, results): return self._handle_rename(node, results, u"imul") @invocation("isinstance(%s, collections.Sequence)") def _isSequenceType(self, node, results): return self._handle_type2abc(node, results, u"collections", u"Sequence") @invocation("isinstance(%s, collections.Mapping)") def _isMappingType(self, node, results): return self._handle_type2abc(node, results, u"collections", u"Mapping") @invocation("isinstance(%s, numbers.Number)") def _isNumberType(self, node, results): return self._handle_type2abc(node, results, u"numbers", u"Number") def _handle_rename(self, node, results, name): method = results["method"][0] method.value = name method.changed() def _handle_type2abc(self, node, results, module, abc): touch_import(None, module, node) obj = results["obj"] args = [obj.clone(), String(u", " + u".".join([module, abc]))] return Call(Name(u"isinstance"), args, prefix=node.prefix) def _check_method(self, node, results): method = getattr(self, "_" + results["method"][0].value.encode("ascii")) if callable(method): if "module" in results: return method else: sub = (unicode(results["obj"]),) invocation_str = unicode(method.invocation) % sub self.warning(node, u"You should use '%s' here." % invocation_str) return None fixes/fix_operator.pyc000064400000012036147204472210011105 0ustar00 {fc@s^dZddlmZddlmZmZmZmZdZdej fdYZ dS(sFixer for operator functions. operator.isCallable(obj) -> hasattr(obj, '__call__') operator.sequenceIncludes(obj) -> operator.contains(obj) operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) operator.isNumberType(obj) -> isinstance(obj, numbers.Number) operator.repeat(obj, n) -> operator.mul(obj, n) operator.irepeat(obj, n) -> operator.imul(obj, n) i(t fixer_base(tCalltNametStringt touch_importcsfd}|S(Ncs |_|S(N(t invocation(tf(ts(s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pytdecs ((RR((Rs2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRst FixOperatorcBseZeZdZdZdZdededeZdZ e ddZ e d d Z e d d Z e d dZe ddZe ddZe ddZdZdZdZRS(tpres method=('isCallable'|'sequenceIncludes' |'isSequenceType'|'isMappingType'|'isNumberType' |'repeat'|'irepeat') s'(' obj=any ')'s power< module='operator' trailer< '.' %(methods)s > trailer< %(obj)s > > | power< %(methods)s trailer< %(obj)s > > tmethodstobjcCs/|j||}|dk r+|||SdS(N(t _check_methodtNone(tselftnodetresultstmethod((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt transform)s soperator.contains(%s)cCs|j||dS(Nucontains(t_handle_rename(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_sequenceIncludes.sshasattr(%s, '__call__')cCsG|d}|jtdtdg}ttd|d|jS(NR u, u '__call__'uhasattrtprefix(tcloneRRRR(RRRR targs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt _isCallable2s !soperator.mul(%s)cCs|j||dS(Numul(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_repeat8ssoperator.imul(%s)cCs|j||dS(Nuimul(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_irepeat<ss$isinstance(%s, collections.Sequence)cCs|j||ddS(Nu collectionsuSequence(t_handle_type2abc(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_isSequenceType@ss#isinstance(%s, collections.Mapping)cCs|j||ddS(Nu collectionsuMapping(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_isMappingTypeDssisinstance(%s, numbers.Number)cCs|j||ddS(NunumbersuNumber(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt _isNumberTypeHscCs%|dd}||_|jdS(NRi(tvaluetchanged(RRRtnameR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRLs cCsatd|||d}|jtddj||gg}ttd|d|jS(NR u, u.u isinstanceR(RRRRtjoinRRR(RRRtmoduletabcR R((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRQs +cCst|d|ddjjd}t|rd|krC|St|df}t|j|}|j|d|ndS(Nt_RitasciiR$R uYou should use '%s' here.(tgetattrR tencodetcallabletunicodeRtwarningR(RRRRtsubtinvocation_str((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyR Ws'  (t__name__t __module__tTruet BM_compatibletorderR R tdicttPATTERNRRRRRRRRRRRR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyR s    N( t__doc__tlib2to3Rtlib2to3.fixer_utilRRRRRtBaseFixR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt s" fixes/fix_operator.pyo000064400000012036147204472210011121 0ustar00 {fc@s^dZddlmZddlmZmZmZmZdZdej fdYZ dS(sFixer for operator functions. operator.isCallable(obj) -> hasattr(obj, '__call__') operator.sequenceIncludes(obj) -> operator.contains(obj) operator.isSequenceType(obj) -> isinstance(obj, collections.Sequence) operator.isMappingType(obj) -> isinstance(obj, collections.Mapping) operator.isNumberType(obj) -> isinstance(obj, numbers.Number) operator.repeat(obj, n) -> operator.mul(obj, n) operator.irepeat(obj, n) -> operator.imul(obj, n) i(t fixer_base(tCalltNametStringt touch_importcsfd}|S(Ncs |_|S(N(t invocation(tf(ts(s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pytdecs ((RR((Rs2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRst FixOperatorcBseZeZdZdZdZdededeZdZ e ddZ e d d Z e d d Z e d dZe ddZe ddZe ddZdZdZdZRS(tpres method=('isCallable'|'sequenceIncludes' |'isSequenceType'|'isMappingType'|'isNumberType' |'repeat'|'irepeat') s'(' obj=any ')'s power< module='operator' trailer< '.' %(methods)s > trailer< %(obj)s > > | power< %(methods)s trailer< %(obj)s > > tmethodstobjcCs/|j||}|dk r+|||SdS(N(t _check_methodtNone(tselftnodetresultstmethod((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt transform)s soperator.contains(%s)cCs|j||dS(Nucontains(t_handle_rename(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_sequenceIncludes.sshasattr(%s, '__call__')cCsG|d}|jtdtdg}ttd|d|jS(NR u, u '__call__'uhasattrtprefix(tcloneRRRR(RRRR targs((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt _isCallable2s !soperator.mul(%s)cCs|j||dS(Numul(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_repeat8ssoperator.imul(%s)cCs|j||dS(Nuimul(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_irepeat<ss$isinstance(%s, collections.Sequence)cCs|j||ddS(Nu collectionsuSequence(t_handle_type2abc(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_isSequenceType@ss#isinstance(%s, collections.Mapping)cCs|j||ddS(Nu collectionsuMapping(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt_isMappingTypeDssisinstance(%s, numbers.Number)cCs|j||ddS(NunumbersuNumber(R(RRR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt _isNumberTypeHscCs%|dd}||_|jdS(NRi(tvaluetchanged(RRRtnameR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRLs cCsatd|||d}|jtddj||gg}ttd|d|jS(NR u, u.u isinstanceR(RRRRtjoinRRR(RRRtmoduletabcR R((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyRQs +cCst|d|ddjjd}t|rd|krC|St|df}t|j|}|j|d|ndS(Nt_RitasciiR$R uYou should use '%s' here.(tgetattrR tencodetcallabletunicodeRtwarningR(RRRRtsubtinvocation_str((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyR Ws'  (t__name__t __module__tTruet BM_compatibletorderR R tdicttPATTERNRRRRRRRRRRRR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyR s    N( t__doc__tlib2to3Rtlib2to3.fixer_utilRRRRRtBaseFixR (((s2/usr/lib64/python2.7/lib2to3/fixes/fix_operator.pyt s" fixes/fix_paren.pyc000064400000003025147204472210010355 0ustar00 {fc@sIdZddlmZddlmZmZdejfdYZdS(suFixer that addes parentheses where they are required This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.i(t fixer_base(tLParentRParentFixParencBseZeZdZdZRS(s atom< ('[' | '(') (listmaker< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > > | testlist_gexp< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > >) (']' | ')') > cCsL|d}t}|j|_d|_|jd||jtdS(Nttargetui(Rtprefixt insert_childt append_childR(tselftnodetresultsRtlparen((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pyt transform%s     (t__name__t __module__tTruet BM_compatibletPATTERNR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pyR sN(t__doc__tRt fixer_utilRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pytsfixes/fix_paren.pyo000064400000003025147204472210010371 0ustar00 {fc@sIdZddlmZddlmZmZdejfdYZdS(suFixer that addes parentheses where they are required This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.i(t fixer_base(tLParentRParentFixParencBseZeZdZdZRS(s atom< ('[' | '(') (listmaker< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > > | testlist_gexp< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > >) (']' | ')') > cCsL|d}t}|j|_d|_|jd||jtdS(Nttargetui(Rtprefixt insert_childt append_childR(tselftnodetresultsRtlparen((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pyt transform%s     (t__name__t __module__tTruet BM_compatibletPATTERNR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pyR sN(t__doc__tRt fixer_utilRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_paren.pytsfixes/fix_print.pyc000064400000005267147204472210010416 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZddlmZm Z m Z m Z m Z ej dZdejfd YZd S( s Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ i(tpatcomp(tpytree(ttoken(t fixer_base(tNametCalltCommatStringtis_tuples"atom< '(' [atom|STRING|NAME] ')' >tFixPrintcBs&eZeZdZdZdZRS(sP simple_stmt< any* bare='print' any* > | print_stmt c Cs2|s t|jd}|rJ|jttdgd|jdS|jdtdksit|jd}t|dkrtj |drdSd}}}|r|dt kr|d }d}n|r3|dt j tjdkr3t|d kst|dj}|d }ng|D]}|j^q:} | rhd | d_n|dk s|dk s|dk r |dk r|j| d tt|n|dk r|j| d tt|n|dk r |j| d|q nttd| } |j| _| S(Ntbareuprinttprefixiiit u>>iiuusepuendufile(tAssertionErrortgettreplaceRRR tchildrentlent parend_exprtmatchtNoneRRtLeafRt RIGHTSHIFTtclonet add_kwargRtrepr( tselftnodetresultst bare_printtargstseptendtfiletargtl_argstn_stmt((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyt transform%s>   %  % $ " "  cCsrd|_tj|jjt|tjtjd|f}|ra|j t d|_n|j |dS(Nuu=u ( R RtNodetsymstargumentRRRtEQUALtappendR(Rtl_nodests_kwdtn_exprt n_argument((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyRMs    (t__name__t __module__tTruet BM_compatibletPATTERNR%R(((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyR s (N(t__doc__tRRtpgen2RRt fixer_utilRRRRRtcompile_patternRtBaseFixR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyts( fixes/fix_print.pyo000064400000005126147204472210010424 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZddlmZm Z m Z m Z m Z ej dZdejfd YZd S( s Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ i(tpatcomp(tpytree(ttoken(t fixer_base(tNametCalltCommatStringtis_tuples"atom< '(' [atom|STRING|NAME] ')' >tFixPrintcBs&eZeZdZdZdZRS(sP simple_stmt< any* bare='print' any* > | print_stmt c Cs|jd}|r>|jttdgd|jdS|jd}t|dkrttj|drtdSd}}}|r|dt kr|d }d}n|r|dt j t jdkr|dj}|d }ng|D]}|j^q} | r%d | d_n|dk sI|dk sI|dk r|dk rw|j| d tt|n|dk r|j| d tt|n|dk r|j| d |qnttd| } |j| _| S(Ntbareuprinttprefixiiit u>>iuusepuendufile(tgettreplaceRRR tchildrentlent parend_exprtmatchtNoneRRtLeafRt RIGHTSHIFTtclonet add_kwargRtrepr( tselftnodetresultst bare_printtargstseptendtfiletargtl_argstn_stmt((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyt transform%s8  %  % $ " "  cCsrd|_tj|jjt|tjtjd|f}|ra|j t d|_n|j |dS(Nuu=u ( R RtNodetsymstargumentRRRtEQUALtappendR(Rtl_nodests_kwdtn_exprt n_argument((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyRMs    (t__name__t __module__tTruet BM_compatibletPATTERNR$R(((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyR s (N(t__doc__tRRtpgen2RRt fixer_utilRRRRRtcompile_patternRtBaseFixR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_print.pyts( fixes/fix_raise.py000064400000005566147204472210010224 0ustar00"""Fixer for 'raise E, V, T' raise -> raise raise E -> raise E raise E, V -> raise E(V) raise E, V, T -> raise E(V).with_traceback(T) raise E, None, T -> raise E.with_traceback(T) raise (((E, E'), E''), E'''), V -> raise E(V) raise "foo", V, T -> warns about string exceptions CAVEATS: 1) "raise E, V" will be incorrectly translated if V is an exception instance. The correct Python 3 idiom is raise E from V but since we can't detect instance-hood by syntax alone and since any client code would have to be changed as well, we don't automate this. """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Attr, ArgList, is_tuple class FixRaise(fixer_base.BaseFix): BM_compatible = True PATTERN = """ raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > """ def transform(self, node, results): syms = self.syms exc = results["exc"].clone() if exc.type == token.STRING: msg = "Python 3 does not support string exceptions" self.cannot_convert(node, msg) return # Python 2 supports # raise ((((E1, E2), E3), E4), E5), V # as a synonym for # raise E1, V # Since Python 3 will not support this, we recurse down any tuple # literals, always taking the first element. if is_tuple(exc): while is_tuple(exc): # exc.children[1:-1] is the unparenthesized tuple # exc.children[1].children[0] is the first element of the tuple exc = exc.children[1].children[0].clone() exc.prefix = u" " if "val" not in results: # One-argument raise new = pytree.Node(syms.raise_stmt, [Name(u"raise"), exc]) new.prefix = node.prefix return new val = results["val"].clone() if is_tuple(val): args = [c.clone() for c in val.children[1:-1]] else: val.prefix = u"" args = [val] if "tb" in results: tb = results["tb"].clone() tb.prefix = u"" e = exc # If there's a traceback and None is passed as the value, then don't # add a call, since the user probably just wants to add a # traceback. See issue #9661. if val.type != token.NAME or val.value != u"None": e = Call(exc, args) with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])] new = pytree.Node(syms.simple_stmt, [Name(u"raise")] + with_tb) new.prefix = node.prefix return new else: return pytree.Node(syms.raise_stmt, [Name(u"raise"), Call(exc, args)], prefix=node.prefix) fixes/fix_raise.pyc000064400000004720147204472210010356 0ustar00 {fc@s{dZddlmZddlmZddlmZddlmZmZm Z m Z m Z dej fdYZ dS( s[Fixer for 'raise E, V, T' raise -> raise raise E -> raise E raise E, V -> raise E(V) raise E, V, T -> raise E(V).with_traceback(T) raise E, None, T -> raise E.with_traceback(T) raise (((E, E'), E''), E'''), V -> raise E(V) raise "foo", V, T -> warns about string exceptions CAVEATS: 1) "raise E, V" will be incorrectly translated if V is an exception instance. The correct Python 3 idiom is raise E from V but since we can't detect instance-hood by syntax alone and since any client code would have to be changed as well, we don't automate this. i(tpytree(ttoken(t fixer_base(tNametCalltAttrtArgListtis_tupletFixRaisecBseZeZdZdZRS(sB raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > c Cs |j}|dj}|jtjkrEd}|j||dSt|rx*t|r}|jdjdj}qTWd|_nd|krt j |j t d|g}|j|_|S|dj}t|rg|jdd!D]}|j^q} nd |_|g} d |kr|d j} d | _|} |jtj ksm|jd krt|| } nt| t d t| gg} t j |jt dg| }|j|_|St j |j t dt|| gd |jSdS(Ntexcs+Python 3 does not support string exceptionsiiu tvaluraiseiuttbuNoneuwith_tracebacktprefix(tsymstclonettypeRtSTRINGtcannot_convertRtchildrenR RtNodet raise_stmtRtNAMEtvalueRRRt simple_stmt( tselftnodetresultsR R tmsgtnewR tctargsR tetwith_tb((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyt transform&s@    !  ,    !%"  (t__name__t __module__tTruet BM_compatibletPATTERNR!(((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyRsN(t__doc__tRtpgen2RRt fixer_utilRRRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyts (fixes/fix_raise.pyo000064400000004720147204472210010372 0ustar00 {fc@s{dZddlmZddlmZddlmZddlmZmZm Z m Z m Z dej fdYZ dS( s[Fixer for 'raise E, V, T' raise -> raise raise E -> raise E raise E, V -> raise E(V) raise E, V, T -> raise E(V).with_traceback(T) raise E, None, T -> raise E.with_traceback(T) raise (((E, E'), E''), E'''), V -> raise E(V) raise "foo", V, T -> warns about string exceptions CAVEATS: 1) "raise E, V" will be incorrectly translated if V is an exception instance. The correct Python 3 idiom is raise E from V but since we can't detect instance-hood by syntax alone and since any client code would have to be changed as well, we don't automate this. i(tpytree(ttoken(t fixer_base(tNametCalltAttrtArgListtis_tupletFixRaisecBseZeZdZdZRS(sB raise_stmt< 'raise' exc=any [',' val=any [',' tb=any]] > c Cs |j}|dj}|jtjkrEd}|j||dSt|rx*t|r}|jdjdj}qTWd|_nd|krt j |j t d|g}|j|_|S|dj}t|rg|jdd!D]}|j^q} nd |_|g} d |kr|d j} d | _|} |jtj ksm|jd krt|| } nt| t d t| gg} t j |jt dg| }|j|_|St j |j t dt|| gd |jSdS(Ntexcs+Python 3 does not support string exceptionsiiu tvaluraiseiuttbuNoneuwith_tracebacktprefix(tsymstclonettypeRtSTRINGtcannot_convertRtchildrenR RtNodet raise_stmtRtNAMEtvalueRRRt simple_stmt( tselftnodetresultsR R tmsgtnewR tctargsR tetwith_tb((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyt transform&s@    !  ,    !%"  (t__name__t __module__tTruet BM_compatibletPATTERNR!(((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyRsN(t__doc__tRtpgen2RRt fixer_utilRRRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_raise.pyts (fixes/fix_raw_input.py000064400000000707147204472210011121 0ustar00"""Fixer that changes raw_input(...) into input(...).""" # Author: Andre Roberge # Local imports from .. import fixer_base from ..fixer_util import Name class FixRawInput(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< name='raw_input' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name(u"input", prefix=name.prefix)) fixes/fix_raw_input.pyc000064400000001666147204472210011271 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s2Fixer that changes raw_input(...) into input(...).i(t fixer_base(tNamet FixRawInputcBseZeZdZdZRS(sU power< name='raw_input' trailer< '(' [any] ')' > any* > cCs*|d}|jtdd|jdS(Ntnameuinputtprefix(treplaceRR(tselftnodetresultsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pytsfixes/fix_reduce.py000064400000001507147204472210010357 0ustar00# Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer for reduce(). Makes sure reduce() is imported from the functools module if reduce is used in that module. """ from lib2to3 import fixer_base from lib2to3.fixer_util import touch_import class FixReduce(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'reduce' trailer< '(' arglist< ( (not(argument) any ',' not(argument > """ def transform(self, node, results): touch_import(u'functools', u'reduce', node) fixes/fix_reduce.pyc000064400000002374147204472210010525 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sqFixer for reduce(). Makes sure reduce() is imported from the functools module if reduce is used in that module. i(t fixer_base(t touch_importt FixReducecBs#eZeZdZdZdZRS(tpresi power< 'reduce' trailer< '(' arglist< ( (not(argument) any ',' not(argument > cCstdd|dS(Nu functoolsureduce(R(tselftnodetresults((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pyt transform"s(t__name__t __module__tTruet BM_compatibletordertPATTERNR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pyRsN(t__doc__tlib2to3Rtlib2to3.fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pytsfixes/fix_renames.pyc000064400000004647147204472210010715 0ustar00 {fc@sudZddlmZddlmZmZiidd6d6ZiZdZdZ d ej fd YZ d S( s?Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize i(t fixer_base(tNamet attr_chaintmaxsizetmaxinttsyscCsddjtt|dS(Nt(t|t)(tjointmaptrepr(tmembers((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt alternatessccsoxhtjD]Z\}}xK|jD]=\}}|t||f) > s^ power< module_name=%r trailer< '.' attr_name=%r > any* > (tMAPPINGtitemstLOOKUP(tmoduletreplacetold_attrtnew_attr((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt build_patterns  t FixRenamescBs8eZeZdjeZdZdZdZ RS(RtprecsUtt|j|}|rQtfdt|dDrMtS|StS(Nc3s|]}|VqdS(N((t.0tobj(tmatch(s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pys 5stparent(tsuperRRtanyRtFalse(tselftnodetresults((Rs1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyR1s %cCsi|jd}|jd}|re|rett|j|jf}|jt|d|jndS(Nt module_namet attr_nametprefix(tgettunicodeRtvalueRRR$(RR R!tmod_nameR#R((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt transform>s  ( t__name__t __module__tTruet BM_compatibleR RtPATTERNtorderRR)(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyR*s  N( t__doc__tRt fixer_utilRRRRR RtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyts  fixes/fix_repr.pyc000064400000002006147204472210010216 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(s/Fixer that transforms `xyzzy` into repr(xyzzy).i(t fixer_base(tCalltNamet parenthesizetFixReprcBseZeZdZdZRS(s7 atom < '`' expr=any '`' > cCsS|dj}|j|jjkr4t|}nttd|gd|jS(Ntexprureprtprefix(tclonettypetsymst testlist1RRRR(tselftnodetresultsR((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pyR sN( t__doc__tRt fixer_utilRRRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pytsfixes/fix_repr.pyo000064400000002006147204472210010232 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(s/Fixer that transforms `xyzzy` into repr(xyzzy).i(t fixer_base(tCalltNamet parenthesizetFixReprcBseZeZdZdZRS(s7 atom < '`' expr=any '`' > cCsS|dj}|j|jjkr4t|}nttd|gd|jS(Ntexprureprtprefix(tclonettypetsymst testlist1RRRR(tselftnodetresultsR((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pyR sN( t__doc__tRt fixer_utilRRRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_repr.pytsfixes/fix_set_literal.pyc000064400000003724147204472210011565 0ustar00 {fc@sOdZddlmZmZddlmZmZdejfdYZdS(s: Optional fixer to transform set() calls to set literals. i(t fixer_basetpytree(ttokentsymst FixSetLiteralcBs#eZeZeZdZdZRS(sjpower< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > c Cs|jd}|rItjtj|jg}|j||}n |d}tjtj dg}|j d|j D|j tjtj d|jj|d_tjtj|}|j|_t|j dkr|j d}|j|j|j d_n|S( Ntsingletitemsu{css|]}|jVqdS(N(tclone(t.0tn((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pys 'su}iii(tgetRtNodeRt listmakerRtreplacetLeafRtLBRACEtextendtchildrentappendtRBRACEt next_siblingtprefixt dictsetmakertlentremove( tselftnodetresultsRtfakeRtliteraltmakerR ((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pyt transforms"      (t__name__t __module__tTruet BM_compatibletexplicittPATTERNR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pyR s N( t__doc__tlib2to3RRtlib2to3.fixer_utilRRtBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pytsfixes/fix_standarderror.pyc000064400000001543147204472210012125 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s%Fixer for StandardError -> Exception.i(t fixer_base(tNametFixStandarderrorcBseZeZdZdZRS(s- 'StandardError' cCstdd|jS(Nu Exceptiontprefix(RR(tselftnodetresults((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pytsfixes/fix_sys_exc.pyc000064400000003271147204472210010730 0ustar00 {fc@sgdZddlmZddlmZmZmZmZmZm Z m Z dej fdYZ dS(sFixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] i(t fixer_base(tAttrtCalltNametNumbert SubscripttNodetsymst FixSysExccBsCeZdddgZeZddjdeDZdZRS(uexc_typeu exc_valueu exc_tracebacksN power< 'sys' trailer< dot='.' attribute=(%s) > > t|ccs|]}d|VqdS(s'%s'N((t.0te((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pys scCs|dd}t|jj|j}ttdd|j}ttd|}|dj|djd_|j t |t t j |d|jS(Nt attributeiuexc_infotprefixusystdoti(Rtexc_infotindextvalueRRR RtchildrentappendRRRtpower(tselftnodetresultstsys_attrRtcalltattr((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyt transforms(t__name__t __module__RtTruet BM_compatibletjointPATTERNR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyRsN( t__doc__tRt fixer_utilRRRRRRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyts4fixes/fix_sys_exc.pyo000064400000003271147204472210010744 0ustar00 {fc@sgdZddlmZddlmZmZmZmZmZm Z m Z dej fdYZ dS(sFixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] i(t fixer_base(tAttrtCalltNametNumbert SubscripttNodetsymst FixSysExccBsCeZdddgZeZddjdeDZdZRS(uexc_typeu exc_valueu exc_tracebacksN power< 'sys' trailer< dot='.' attribute=(%s) > > t|ccs|]}d|VqdS(s'%s'N((t.0te((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pys scCs|dd}t|jj|j}ttdd|j}ttd|}|dj|djd_|j t |t t j |d|jS(Nt attributeiuexc_infotprefixusystdoti(Rtexc_infotindextvalueRRR RtchildrentappendRRRtpower(tselftnodetresultstsys_attrRtcalltattr((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyt transforms(t__name__t __module__RtTruet BM_compatibletjointPATTERNR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyRsN( t__doc__tRt fixer_utilRRRRRRRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_sys_exc.pyts4fixes/fix_throw.pyc000064400000003732147204472210010420 0ustar00 {fc@s{dZddlmZddlmZddlmZddlmZmZm Z m Z m Z dej fdYZ dS( sFixer for generator.throw(E, V, T). g.throw(E) -> g.throw(E) g.throw(E, V) -> g.throw(E(V)) g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) g.throw("foo"[, V[, T]]) will warn about string exceptions.i(tpytree(ttoken(t fixer_base(tNametCalltArgListtAttrtis_tupletFixThrowcBseZeZdZdZRS(s power< any trailer< '.' 'throw' > trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > > | power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > c CsP|j}|dj}|jtjkr?|j|ddS|jd}|dkr^dS|j}t|rg|j dd!D]}|j^q}nd|_ |g}|d}d|kr6|dj} d| _ t ||} t | t d t| gg} |jtj|j| n|jt ||dS( Ntexcs+Python 3 does not support string exceptionsuvaliiutargsttbuwith_traceback(tsymstclonettypeRtSTRINGtcannot_converttgettNoneRtchildrentprefixRRRRtreplaceRtNodetpower( tselftnodetresultsR R tvaltcR t throw_argsR tetwith_tb((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyt transforms*    ,     %(t__name__t __module__tTruet BM_compatibletPATTERNR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyRsN(t__doc__tRtpgen2RRt fixer_utilRRRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyts (fixes/fix_tuple_params.pyc000064400000012522147204472210011746 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZmZm Z m Z m Z m Z dZ dejfdYZd Zd Zgd d Zd Zd S(s:Fixer for function definitions with tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error in Python 3 lambda (x): x + y -> lambda x: x + y i(tpytree(ttoken(t fixer_base(tAssigntNametNewlinetNumbert SubscripttsymscCs)t|tjo(|jdjtjkS(Ni(t isinstanceRtNodetchildrenttypeRtSTRING(tstmt((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt is_docstringstFixTupleParamscBs,eZdZeZdZdZdZRS(is funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > c s0d|krj||Sg|d}|d}|djdjtjkryd}|djdj}tn!d}d}tjtjdt fd }|jt j kr||n`|jt j kr1xKt |jD]7\}} | jt j kr|| d |dkqqWns;dSxD]} |d| _qBW|} |dkr{d d_n1t|dj|r|d_|d} nxD]} |d| _qW|dj| | +x=t| d| tdD]}||dj|_qW|djdS( Ntlambdatsuitetargsiiiu; ucstj}|j}d|_t||j}|rNd|_n|j|jtjt j |jgdS(Nuu ( Rtnew_nametclonetprefixRtreplacetappendRR Rt simple_stmt(t tuple_argt add_prefixtntargR(tendt new_linestself(s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt handle_tupleCs    Ru (ttransform_lambdaR R RtINDENTtvalueRRtLeaftFalseRttfpdeft typedargslistt enumeratetparentRRtrangetlentchanged( R tnodetresultsRRtstarttindentR!tiRtlinetafter((RRR s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt transform.sF            (cCsN|d}|d}t|d}|jtjkr\|j}d|_|j|dSt|}t|}|j t |}t |dd} |j| jx|j D]} | jtjkr| j |krg|| j D]} | j^q} tjtj| jg| } | j| _| j| qqWdS(NRtbodytinneru R(t simplify_argsR RtNAMERRRt find_paramst map_to_indexRt tuple_nameRt post_orderR$RR Rtpower(R R.R/RR6R7tparamstto_indexttup_namet new_paramRtct subscriptstnew((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR"ns(       !&  (t__name__t __module__t run_ordertTruet BM_compatibletPATTERNR5R"(((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyRs   @cCso|jtjtjfkr|S|jtjkr[x#|jtjkrV|jd}q4W|Std|dS(NisReceived unexpected node %s(R RtvfplistRR9tvfpdefR t RuntimeError(R.((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR8scCsn|jtjkr#t|jdS|jtjkr<|jSg|jD]$}|jtjkrFt|^qFS(Ni( R RRMR:R RR9R$tCOMMA(R.RC((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR:s cCs|dkri}nxht|D]Z\}}ttt|g}t|trnt||d|q"||||s. l  fixes/fix_types.pyc000064400000004242147204472210010416 0ustar00 {fc@s dZddlmZddlmZddlmZidd6dd6d d 6d d 6d d6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d d'6d(d)6d*d+6ZgeD]Zd,e^qZ d-ej fd.YZ d/S(0sFixer for removing uses of the types module. These work for only the known names in the types module. The forms above can include types. or not. ie, It is assumed the module is imported either as: import types from types import ... # either * or specific types The import statements are not modified. There should be another fixer that handles at least the following constants: type([]) -> list type(()) -> tuple type('') -> str i(ttoken(t fixer_base(tNametboolt BooleanTypet memoryviewt BufferTypettypet ClassTypetcomplext ComplexTypetdicttDictTypetDictionaryTypestype(Ellipsis)t EllipsisTypetfloatt FloatTypetinttIntTypetlisttListTypetLongTypetobjectt ObjectTypes type(None)tNoneTypestype(NotImplemented)tNotImplementedTypetslicet SliceTypetbytest StringTypes(str,)t StringTypesttuplet TupleTypetTypeTypetstrt UnicodeTypetranget XRangeTypes)power< 'types' trailer< '.' name='%s' > >tFixTypescBs&eZeZdjeZdZRS(t|cCs9ttj|dj}|r5t|d|jSdS(Ntnametprefix(tunicodet _TYPE_MAPPINGtgettvalueRR)tNone(tselftnodetresultst new_value((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyt transform:s(t__name__t __module__tTruet BM_compatibletjoint_patstPATTERNR3(((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyR&6sN( t__doc__tpgen2RtRt fixer_utilRR+ttR9tBaseFixR&(((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyts6 fixes/fix_unicode.pyc000064400000003304147204472210010676 0ustar00 {fc@sWdZddlmZddlmZidd6dd6Zdejfd YZd S( sFixer for unicode. * Changes unicode to str and unichr to chr. * If "...\u..." is not unicode literal change it into "...\\u...". * Change u"..." into "...". i(ttoken(t fixer_baseuchruunichrustruunicodet FixUnicodecBs&eZeZdZdZdZRS(sSTRING | 'unicode' | 'unichr'cCs/tt|j||d|jk|_dS(Ntunicode_literals(tsuperRt start_treetfuture_featuresR(tselfttreetfilename((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyRscCs|jtjkr2|j}t|j|_|S|jtjkr|j}|j r|ddkrd|krdjg|j dD]$}|j ddj dd^q}n|dd kr|d }n||jkr|S|j}||_|SdS( Niu'"u\u\\u\uu\\uu\Uu\\UuuUi( ttypeRtNAMEtclonet_mappingtvaluetSTRINGRtjointsplittreplace(Rtnodetresultstnewtvaltv((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyt transforms"  &=   (t__name__t __module__tTruet BM_compatibletPATTERNRR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyRs N(t__doc__tpgen2RtRR tBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyt sfixes/fix_unicode.pyo000064400000003304147204472210010712 0ustar00 {fc@sWdZddlmZddlmZidd6dd6Zdejfd YZd S( sFixer for unicode. * Changes unicode to str and unichr to chr. * If "...\u..." is not unicode literal change it into "...\\u...". * Change u"..." into "...". i(ttoken(t fixer_baseuchruunichrustruunicodet FixUnicodecBs&eZeZdZdZdZRS(sSTRING | 'unicode' | 'unichr'cCs/tt|j||d|jk|_dS(Ntunicode_literals(tsuperRt start_treetfuture_featuresR(tselfttreetfilename((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyRscCs|jtjkr2|j}t|j|_|S|jtjkr|j}|j r|ddkrd|krdjg|j dD]$}|j ddj dd^q}n|dd kr|d }n||jkr|S|j}||_|SdS( Niu'"u\u\\u\uu\\uu\Uu\\UuuUi( ttypeRtNAMEtclonet_mappingtvaluetSTRINGRtjointsplittreplace(Rtnodetresultstnewtvaltv((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyt transforms"  &=   (t__name__t __module__tTruet BM_compatibletPATTERNRR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyRs N(t__doc__tpgen2RtRR tBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_unicode.pyt sfixes/fix_urllib.pyc000064400000015770147204472210010553 0ustar00 {fc@ssdZddlmZmZddlmZddlmZmZm Z m Z m Z m Z m Z iddddd d d d d gfddddddddddddddddgfddgfgd 6dd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7gfdd8d9gfgd:6Zed:jed d;d<Zd=efd>YZd?S(@sFix changes imports of urllib which are now incompatible. This is rather similar to fix_imports, but because of the more complex nature of the fixing for urllib, it has its own fixer. i(t alternatest FixImports(t fixer_base(tNametCommat FromImporttNewlinetfind_indentationtNodetsymssurllib.requestt URLopenertFancyURLopenert urlretrievet _urlopenerturlopent urlcleanupt pathname2urlt url2pathnames urllib.parsetquotet quote_plustunquotet unquote_plust urlencodet splitattrt splithostt splitnportt splitpasswdt splitportt splitquerytsplittagt splittypet splitusert splitvalues urllib.errortContentTooShortErrorturllibtinstall_openert build_openertRequesttOpenerDirectort BaseHandlertHTTPDefaultErrorHandlertHTTPRedirectHandlertHTTPCookieProcessort ProxyHandlertHTTPPasswordMgrtHTTPPasswordMgrWithDefaultRealmtAbstractBasicAuthHandlertHTTPBasicAuthHandlertProxyBasicAuthHandlertAbstractDigestAuthHandlertHTTPDigestAuthHandlertProxyDigestAuthHandlert HTTPHandlert HTTPSHandlert FileHandlert FTPHandlertCacheFTPHandlertUnknownHandlertURLErrort HTTPErrorturllib2iccst}xtjD]w\}}xh|D]`}|\}}t|}d||fVd|||fVd|Vd|Vd||fVq)WqWdS(Nsimport_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > simport_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > sIimport_from< 'from' module_star=%r 'import' star='*' > stimport_name< 'import' dotted_as_name< module_as=%r 'as' any > > sKpower< bare_with_attr=%r trailer< '.' member=%s > any* > (tsettMAPPINGtitemsR(tbaret old_moduletchangestchanget new_moduletmembers((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt build_pattern1s      t FixUrllibcBs5eZdZdZdZdZdZRS(cCsdjtS(Nt|(tjoinRF(tself((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyRFJscCs|jd}|j}g}x?t|jd D],}|jt|dd|tgq0W|jtt|jddd||j|dS(sTransform for the basic import case. Replaces the old import name with a comma separated list of its replacements. tmoduleiitprefixN( tgetRLR>tvaluetextendRRtappendtreplace(RJtnodetresultst import_modtpreftnamestname((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyttransform_importMs *(cCs|jd}|j}|jd}|rt|trI|d}nd }x6t|jD]'}|j|dkr]|d}Pq]q]W|r|jt|d|q|j |dn/g}i} |d} x| D]}|j t j kr|j dj} |j dj} n|j} d } | d krxlt|jD]Z}| |dkr>|d| krx|j|dn| j|dgj|q>q>WqqWg} t|}t}d }x|D]}| |}g}x8|d D],}|j||||jtqW|j||d |t||}| sa|jjj|rm||_n| j|t}qW| rg}x(| d D]}|j|tgqW|j| d |j|n|j |d d S(sTransform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. t mod_membertmemberiiRLs!This is an invalid module elementREiu,cSsz|jtjkrdt|jdjd||jdj|jdjg}ttj|gSt|jd|gS(NiRLii(ttypeR timport_as_nameRtchildrenRNtcloneR(RWRLtkids((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt handle_names isAll module elements are invalidN(RMRLt isinstancetlisttNoneR>RNRQRtcannot_convertR[R R\R]RPt setdefaultRtTrueRORRtparenttendswithtFalseR(RJRRRSRYRURZtnew_nameRCtmodulestmod_dictREtas_namet member_namet new_nodest indentationtfirstR`RKteltsRVtelttnewtnodestnew_node((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyttransform_member]sh       +       cCs|jd}|jd}d}t|tr@|d}nx6t|jD]'}|j|dkrN|d}PqNqNW|r|jt|d|jn|j |ddS(s.Transform for calls to module members in code.tbare_with_attrRZiiRLs!This is an invalid module elementN( RMRcRaRbR>RNRQRRLRd(RJRRRSt module_dotRZRjRC((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt transform_dots  cCs|jdr"|j||n|jdrD|j||nf|jdrf|j||nD|jdr|j|dn"|jdr|j|dndS(NRKRYRxt module_starsCannot handle star imports.t module_ass#This module is now multiple modules(RMRXRwRzRd(RJRRRS((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt transforms(t__name__t __module__RFRXRwRzR}(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyRGHs    L N(t__doc__tlib2to3.fixes.fix_importsRRtlib2to3Rtlib2to3.fixer_utilRRRRRRR R>RPRFRG(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pytsD4           fixes/fix_ws_comma.pyc000064400000002564147204472210011064 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(sFixer that changes 'a ,b' into 'a, b'. This also changes '{a :b}' into '{a: b}', but does not touch other uses of colons. It does not touch other uses of whitespace. i(tpytree(ttoken(t fixer_baset FixWsCommacBsSeZeZdZejejdZejej dZ ee fZ dZ RS(sH any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> u,u:cCs|j}t}x|jD]u}||jkrg|j}|jr^d|kr^d|_nt}q|r|j}|sd|_qnt}qW|S(Nu uu (tclonetFalsetchildrentSEPStprefixtisspacetTrue(tselftnodetresultstnewtcommatchildR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pyt transforms      ( t__name__t __module__R texplicittPATTERNRtLeafRtCOMMAtCOLONRR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pyR s  N(t__doc__tRtpgen2RRtBaseFixR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pytsfixes/fix_xrange.pyc000064400000006017147204472210010540 0ustar00 {fc@s_dZddlmZddlmZmZmZddlmZdejfdYZ dS(s/Fixer that changes xrange(...) into range(...).i(t fixer_base(tNametCalltconsuming_calls(tpatcompt FixXrangecBsteZeZdZdZdZdZdZdZ dZ e j e Z dZe j eZdZRS( s power< (name='range'|name='xrange') trailer< '(' args=any ')' > rest=any* > cCs)tt|j||t|_dS(N(tsuperRt start_treetsetttransformed_xranges(tselfttreetfilename((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyRscCs d|_dS(N(tNoneR (R R R ((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyt finish_treescCs^|d}|jdkr)|j||S|jdkrH|j||Stt|dS(Ntnameuxrangeurange(tvaluettransform_xrangettransform_ranget ValueErrortrepr(R tnodetresultsR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyt transforms  cCs@|d}|jtdd|j|jjt|dS(NRurangetprefix(treplaceRRR taddtid(R RRR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR$s cCst||jkr|j| rttd|djg}ttd|gd|j}x|dD]}|j|qsW|SdS(NurangetargsulistRtrest(RR tin_special_contextRRtcloneRt append_child(R RRt range_callt list_calltn((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR*s" s3power< func=NAME trailer< '(' node=any ')' > any* >sfor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > | comparison< any 'in' node=any any*> cCs|jdkrtSi}|jjdk rg|jj|jj|rg|d|krg|djtkS|jj|j|o|d|kS(NRtfunc(tparentR tFalsetp1tmatchRRtp2(R RR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR?s(t__name__t __module__tTruet BM_compatibletPATTERNRRRRRtP1Rtcompile_patternR'tP2R)R(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR s    N( t__doc__tRt fixer_utilRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pytsfixes/fix_xreadlines.pyc000064400000002216147204472210011407 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(spFix "for x in f.xreadlines()" -> "for x in f". This fixer will also convert g(f.xreadlines) into g(f.__iter__).i(t fixer_base(tNamet FixXreadlinescBseZeZdZdZRS(s power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > | power< any+ trailer< '.' no_call='xreadlines' > > cCsb|jd}|r4|jtdd|jn*|jg|dD]}|j^qEdS(Ntno_callu__iter__tprefixtcall(tgettreplaceRRtclone(tselftnodetresultsRtx((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR (((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pytsfixes/fix_xreadlines.pyo000064400000002216147204472210011423 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(spFix "for x in f.xreadlines()" -> "for x in f". This fixer will also convert g(f.xreadlines) into g(f.__iter__).i(t fixer_base(tNamet FixXreadlinescBseZeZdZdZRS(s power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > | power< any+ trailer< '.' no_call='xreadlines' > > cCsb|jd}|r4|jtdd|jn*|jg|dD]}|j^qEdS(Ntno_callu__iter__tprefixtcall(tgettreplaceRRtclone(tselftnodetresultsRtx((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR (((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_xreadlines.pytsfixes/fix_zip.pyc000064400000002520147204472210010051 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(s7 Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. i(t fixer_base(tNametCalltin_special_contexttFixZipcBs#eZeZdZdZdZRS(s: power< 'zip' args=trailer< '(' [any] ')' > > sfuture_builtins.zipcCs`|j|rdSt|r#dS|j}d|_ttd|g}|j|_|S(Nuulist(t should_skipRtNonetclonetprefixRR(tselftnodetresultstnew((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pyt transforms    (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR (((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pyRsN( t__doc__tRt fixer_utilRRRtConditionalFixR(((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pytsfixes/__init__.pyo000064400000000205147204472210010152 0ustar00 {fc@sdS(N((((s./usr/lib64/python2.7/lib2to3/fixes/__init__.pyttfixes/fix_apply.py000064400000004601147204472210010233 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for apply(). This converts apply(func, v, k) into (func)(*v, **k).""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Call, Comma, parenthesize class FixApply(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'apply' trailer< '(' arglist< (not argument ')' > > """ def transform(self, node, results): syms = self.syms assert results func = results["func"] args = results["args"] kwds = results.get("kwds") # I feel like we should be able to express this logic in the # PATTERN above but I don't know how to do it so... if args: if args.type == self.syms.star_expr: return # Make no change. if (args.type == self.syms.argument and args.children[0].value == '**'): return # Make no change. if kwds and (kwds.type == self.syms.argument and kwds.children[0].value == '**'): return # Make no change. prefix = node.prefix func = func.clone() if (func.type not in (token.NAME, syms.atom) and (func.type != syms.power or func.children[-2].type == token.DOUBLESTAR)): # Need to parenthesize func = parenthesize(func) func.prefix = "" args = args.clone() args.prefix = "" if kwds is not None: kwds = kwds.clone() kwds.prefix = "" l_newargs = [pytree.Leaf(token.STAR, u"*"), args] if kwds is not None: l_newargs.extend([Comma(), pytree.Leaf(token.DOUBLESTAR, u"**"), kwds]) l_newargs[-2].prefix = u" " # that's the ** token # XXX Sometimes we could be cleverer, e.g. apply(f, (x, y) + t) # can be translated into f(x, y, *t) instead of f(*(x, y) + t) #new = pytree.Node(syms.power, (func, ArgList(l_newargs))) return Call(func, l_newargs, prefix=prefix) fixes/fix_asserts.pyo000064400000003031147204472210010745 0ustar00 {fc@sdZddlmZddlmZedddddd d d d d dddddddd dd dd ddddddZdefdYZdS(s5Fixer that replaces deprecated unittest method names.i(tBaseFix(tNametassert_t assertTruet assertEqualst assertEqualtassertNotEqualstassertNotEqualtassertAlmostEqualstassertAlmostEqualtassertNotAlmostEqualstassertNotAlmostEqualtassertRegexpMatchest assertRegextassertRaisesRegexptassertRaisesRegextfailUnlessEqualt failIfEqualtfailUnlessAlmostEqualtfailIfAlmostEqualt failUnlesstfailUnlessRaisest assertRaisestfailIft assertFalset FixAssertscBs-eZddjeeeZdZRS(sH power< any+ trailer< '.' meth=(%s)> any* > t|cCs8|dd}|jttt|d|jdS(Ntmethitprefix(treplaceRtNAMEStstrR(tselftnodetresultstname((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyt transform s(t__name__t __module__tjointmaptreprRtPATTERNR$(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyRsN(t__doc__t fixer_baseRt fixer_utilRtdictRR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_asserts.pyts$ fixes/fix_basestring.py000064400000000501147204472210011242 0ustar00"""Fixer for basestring -> str.""" # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import Name class FixBasestring(fixer_base.BaseFix): BM_compatible = True PATTERN = "'basestring'" def transform(self, node, results): return Name(u"str", prefix=node.prefix) fixes/fix_basestring.pyo000064400000001447147204472210011433 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sFixer for basestring -> str.i(t fixer_base(tNamet FixBasestringcBseZeZdZdZRS(s 'basestring'cCstdd|jS(Nustrtprefix(RR(tselftnodetresults((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pyt transform s(t__name__t __module__tTruet BM_compatibletPATTERNR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s4/usr/lib64/python2.7/lib2to3/fixes/fix_basestring.pytsfixes/fix_buffer.py000064400000001117147204472210010356 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes buffer(...) into memoryview(...).""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixBuffer(fixer_base.BaseFix): BM_compatible = True explicit = True # The user must ask for this fixer PATTERN = """ power< name='buffer' trailer< '(' [any] ')' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name(u"memoryview", prefix=name.prefix)) fixes/fix_buffer.pyo000064400000001704147204472210010537 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s4Fixer that changes buffer(...) into memoryview(...).i(t fixer_base(tNamet FixBuffercBs#eZeZeZdZdZRS(sR power< name='buffer' trailer< '(' [any] ')' > any* > cCs*|d}|jtdd|jdS(Ntnameu memoryviewtprefix(treplaceRR(tselftnodetresultsR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pyt transforms (t__name__t __module__tTruet BM_compatibletexplicittPATTERNR (((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_buffer.pytsfixes/fix_dict.py000064400000007360147204472210010036 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) d.viewkeys() -> d.keys() d.viewitems() -> d.items() d.viewvalues() -> d.values() Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). """ # Local imports from .. import pytree from .. import patcomp from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, LParen, RParen, ArgList, Dot from .. import fixer_util iter_exempt = fixer_util.consuming_calls | set(["iter"]) class FixDict(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues'| 'viewkeys'|'viewitems'|'viewvalues') > parens=trailer< '(' ')' > tail=any* > """ def transform(self, node, results): head = results["head"] method = results["method"][0] # Extract node for method name tail = results["tail"] syms = self.syms method_name = method.value isiter = method_name.startswith(u"iter") isview = method_name.startswith(u"view") if isiter or isview: method_name = method_name[4:] assert method_name in (u"keys", u"items", u"values"), repr(method) head = [n.clone() for n in head] tail = [n.clone() for n in tail] special = not tail and self.in_special_context(node, isiter) args = head + [pytree.Node(syms.trailer, [Dot(), Name(method_name, prefix=method.prefix)]), results["parens"].clone()] new = pytree.Node(syms.power, args) if not (special or isview): new.prefix = u"" new = Call(Name(u"iter" if isiter else u"list"), [new]) if tail: new = pytree.Node(syms.power, [new] + tail) new.prefix = node.prefix return new P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p2 = patcomp.compile_pattern(P2) def in_special_context(self, node, isiter): if node.parent is None: return False results = {} if (node.parent.parent is not None and self.p1.match(node.parent.parent, results) and results["node"] is node): if isiter: # iter(d.iterkeys()) -> iter(d.keys()), etc. return results["func"].value in iter_exempt else: # list(d.keys()) -> list(d.keys()), etc. return results["func"].value in fixer_util.consuming_calls if not isiter: return False # for ... in d.iterkeys() -> for ... in d.keys(), etc. return self.p2.match(node.parent, results) and results["node"] is node fixes/fix_dict.pyc000064400000007271147204472210010202 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZddlmZm Z m Z m Z m Z m Z ddlmZejedgBZd ejfd YZd S( sjFixer for dict methods. d.keys() -> list(d.keys()) d.items() -> list(d.items()) d.values() -> list(d.values()) d.iterkeys() -> iter(d.keys()) d.iteritems() -> iter(d.items()) d.itervalues() -> iter(d.values()) d.viewkeys() -> d.keys() d.viewitems() -> d.items() d.viewvalues() -> d.values() Except in certain very specific contexts: the iter() can be dropped when the context is list(), sorted(), iter() or for...in; the list() can be dropped when the context is list() or sorted() (but not iter() or for...in!). Special contexts that apply to both: list(), sorted(), tuple() set(), any(), all(), sum(). Note: iter(d.keys()) could be written as iter(d) but since the original d.iterkeys() was also redundant we don't fix this. And there are (rare) contexts where it makes a difference (e.g. when passing it as an argument to a function that introspects the argument). i(tpytree(tpatcomp(ttoken(t fixer_base(tNametCalltLParentRParentArgListtDot(t fixer_utiltitertFixDictcBsPeZeZdZdZdZejeZ dZ eje Z dZ RS(s power< head=any+ trailer< '.' method=('keys'|'items'|'values'| 'iterkeys'|'iteritems'|'itervalues'| 'viewkeys'|'viewitems'|'viewvalues') > parens=trailer< '(' ')' > tail=any* > cCs|d}|dd}|d}|j}|j}|jd}|jd} |s^| rk|d}n|dkstt|g|D]} | j^q}g|D]} | j^q}| o|j||} |tj|j t t |d |j g|d jg} tj|j | } | p?| srd | _ tt |r]dnd| g} n|rtj|j | g|} n|j | _ | S(Ntheadtmethodittailuiteruviewiukeysuitemsuvaluestprefixtparensuulist(ukeysuitemsuvalues(tsymstvaluet startswithtAssertionErrortreprtclonetin_special_contextRtNodettrailerR RRtpowerR(tselftnodetresultsR RRRt method_nametisitertisviewtntspecialtargstnew((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyt transform7s4         ' s3power< func=NAME trailer< '(' node=any ')' > any* >smfor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > cCs|jdkrtSi}|jjdk r|jj|jj|r|d|kr|rm|djtkS|djtjkSn|stS|j j|j|o|d|kS(NRtfunc( tparenttNonetFalsetp1tmatchRt iter_exemptR tconsuming_callstp2(RRR R((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyR[s( t__name__t __module__tTruet BM_compatibletPATTERNR&tP1Rtcompile_patternR+tP2R/R(((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyR *s  N(t__doc__tRRtpgen2RRR RRRRRR R.tsetR-tBaseFixR (((s./usr/lib64/python2.7/lib2to3/fixes/fix_dict.pyts.fixes/fix_exec.py000064400000001752147204472210010036 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) """ # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Comma, Name, Call class FixExec(fixer_base.BaseFix): BM_compatible = True PATTERN = """ exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > """ def transform(self, node, results): assert results syms = self.syms a = results["a"] b = results.get("b") c = results.get("c") args = [a.clone()] args[0].prefix = "" if b is not None: args.extend([Comma(), b.clone()]) if c is not None: args.extend([Comma(), c.clone()]) return Call(Name(u"exec"), args, prefix=node.prefix) fixes/fix_exec.pyc000064400000002630147204472210010175 0ustar00 {fc@s_dZddlmZddlmZddlmZmZmZdejfdYZ dS(sFixer for exec. This converts usages of the exec statement into calls to a built-in exec() function. exec code in ns1, ns2 -> exec(code, ns1, ns2) i(tpytree(t fixer_base(tCommatNametCalltFixExeccBseZeZdZdZRS(sx exec_stmt< 'exec' a=any 'in' b=any [',' c=any] > | exec_stmt< 'exec' (not atom<'(' [any] ')'>) a=any > cCs|s t|j}|d}|jd}|jd}|jg}d|d_|dk r|jt|jgn|dk r|jt|jgntt d|d|jS(Ntatbtctiuexectprefix( tAssertionErrortsymstgettcloneR tNonetextendRRR(tselftnodetresultsR RRRtargs((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyt transforms      (t__name__t __module__tTruet BM_compatibletPATTERNR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyRsN( t__doc__R RRt fixer_utilRRRtBaseFixR(((s./usr/lib64/python2.7/lib2to3/fixes/fix_exec.pyt sfixes/fix_filter.py000064400000004073147204472210010376 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes filter(F, X) into list(filter(F, X)). We avoid the transformation if the filter() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on filter(F, X) to return a string if X is a string and a tuple if X is a tuple. That would require type inference, which we don't do. Let Python 2.6 figure it out. """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ListComp, in_special_context class FixFilter(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ filter_lambda=power< 'filter' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'filter' trailer< '(' arglist< none='None' ',' seq=any > ')' > > | power< 'filter' args=trailer< '(' [any] ')' > > """ skip_on = "future_builtins.filter" def transform(self, node, results): if self.should_skip(node): return if "filter_lambda" in results: new = ListComp(results.get("fp").clone(), results.get("fp").clone(), results.get("it").clone(), results.get("xp").clone()) elif "none" in results: new = ListComp(Name(u"_f"), Name(u"_f"), results["seq"].clone(), Name(u"_f")) else: if in_special_context(node): return None new = node.clone() new.prefix = u"" new = Call(Name(u"list"), [new]) new.prefix = node.prefix return new fixes/fix_filter.pyo000064400000004336147204472210010557 0ustar00 {fc@sedZddlmZddlmZddlmZmZmZm Z dej fdYZ dS(sFixer that changes filter(F, X) into list(filter(F, X)). We avoid the transformation if the filter() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. NOTE: This is still not correct if the original code was depending on filter(F, X) to return a string if X is a string and a tuple if X is a tuple. That would require type inference, which we don't do. Let Python 2.6 figure it out. i(ttoken(t fixer_base(tNametCalltListComptin_special_contextt FixFiltercBs#eZeZdZdZdZRS(s filter_lambda=power< 'filter' trailer< '(' arglist< lambdef< 'lambda' (fp=NAME | vfpdef< '(' fp=NAME ')'> ) ':' xp=any > ',' it=any > ')' > > | power< 'filter' trailer< '(' arglist< none='None' ',' seq=any > ')' > > | power< 'filter' args=trailer< '(' [any] ')' > > sfuture_builtins.filtercCs|j|rdSd|krst|jdj|jdj|jdj|jdj}n}d|krttdtd|djtd}n=t|rdS|j}d|_ttd |g}|j|_|S( Nt filter_lambdatfptittxptnoneu_ftsequulist( t should_skipRtgettcloneRRtNonetprefixR(tselftnodetresultstnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyt transform5s&         (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyRsN( t__doc__tpgen2RtRt fixer_utilRRRRtConditionalFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_filter.pyts"fixes/fix_funcattrs.py000064400000001205147204472210011114 0ustar00"""Fix function attribute names (f.func_x -> f.__x__).""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixFuncattrs(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' | 'func_name' | 'func_defaults' | 'func_code' | 'func_dict') > any* > """ def transform(self, node, results): attr = results["attr"][0] attr.replace(Name((u"__%s__" % attr.value[5:]), prefix=attr.prefix)) fixes/fix_funcattrs.pyo000064400000002150147204472210011273 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s3Fix function attribute names (f.func_x -> f.__x__).i(t fixer_base(tNamet FixFuncattrscBseZeZdZdZRS(s power< any+ trailer< '.' attr=('func_closure' | 'func_doc' | 'func_globals' | 'func_name' | 'func_defaults' | 'func_code' | 'func_dict') > any* > cCs9|dd}|jtd|jdd|jdS(Ntattriu__%s__itprefix(treplaceRtvalueR(tselftnodetresultsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR (((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_funcattrs.pytsfixes/fix_future.py000064400000001043147204472210010415 0ustar00"""Remove __future__ imports from __future__ import foo is replaced with an empty line. """ # Author: Christian Heimes # Local imports from .. import fixer_base from ..fixer_util import BlankLine class FixFuture(fixer_base.BaseFix): BM_compatible = True PATTERN = """import_from< 'from' module_name="__future__" 'import' any >""" # This should be run last -- some things check for the import run_order = 10 def transform(self, node, results): new = BlankLine() new.prefix = node.prefix return new fixes/fix_future.pyo000064400000001645147204472210010604 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sVRemove __future__ imports from __future__ import foo is replaced with an empty line. i(t fixer_base(t BlankLinet FixFuturecBs#eZeZdZdZdZRS(s;import_from< 'from' module_name="__future__" 'import' any >i cCst}|j|_|S(N(Rtprefix(tselftnodetresultstnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pyt transforms  (t__name__t __module__tTruet BM_compatibletPATTERNt run_orderR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_future.pytsfixes/fix_getcwdu.py000064400000000704147204472210010550 0ustar00""" Fixer that changes os.getcwdu() to os.getcwd(). """ # Author: Victor Stinner # Local imports from .. import fixer_base from ..fixer_util import Name class FixGetcwdu(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'os' trailer< dot='.' name='getcwdu' > any* > """ def transform(self, node, results): name = results["name"] name.replace(Name(u"getcwd", prefix=name.prefix)) fixes/fix_getcwdu.pyo000064400000001654147204472210010734 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s1 Fixer that changes os.getcwdu() to os.getcwd(). i(t fixer_base(tNamet FixGetcwducBseZeZdZdZRS(sR power< 'os' trailer< dot='.' name='getcwdu' > any* > cCs*|d}|jtdd|jdS(Ntnameugetcwdtprefix(treplaceRR(tselftnodetresultsR((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_getcwdu.pytsfixes/fix_has_key.py000064400000006233147204472210010534 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for has_key(). Calls to .has_key() methods are expressed in terms of the 'in' operator: d.has_key(k) -> k in d CAVEATS: 1) While the primary target of this fixer is dict.has_key(), the fixer will change any has_key() method call, regardless of its class. 2) Cases like this will not be converted: m = d.has_key if m(k): ... Only *calls* to has_key() are converted. While it is possible to convert the above to something like m = d.__contains__ if m(k): ... this is currently not done. """ # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, parenthesize class FixHasKey(fixer_base.BaseFix): BM_compatible = True PATTERN = """ anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > after=any* > | negation=not_test< 'not' anchor=power< before=any+ trailer< '.' 'has_key' > trailer< '(' ( not(arglist | argument) arg=any ','> ) ')' > > > """ def transform(self, node, results): assert results syms = self.syms if (node.parent.type == syms.not_test and self.pattern.match(node.parent)): # Don't transform a node matching the first alternative of the # pattern when its parent matches the second alternative return None negation = results.get("negation") anchor = results["anchor"] prefix = node.prefix before = [n.clone() for n in results["before"]] arg = results["arg"].clone() after = results.get("after") if after: after = [n.clone() for n in after] if arg.type in (syms.comparison, syms.not_test, syms.and_test, syms.or_test, syms.test, syms.lambdef, syms.argument): arg = parenthesize(arg) if len(before) == 1: before = before[0] else: before = pytree.Node(syms.power, before) before.prefix = u" " n_op = Name(u"in", prefix=u" ") if negation: n_not = Name(u"not", prefix=u" ") n_op = pytree.Node(syms.comp_op, (n_not, n_op)) new = pytree.Node(syms.comparison, (arg, n_op, before)) if after: new = parenthesize(new) new = pytree.Node(syms.power, (new,) + tuple(after)) if node.parent.type in (syms.comparison, syms.expr, syms.xor_expr, syms.and_expr, syms.shift_expr, syms.arith_expr, syms.term, syms.factor, syms.power): new = parenthesize(new) new.prefix = prefix return new fixes/fix_import.pyo000064400000006265147204472210010607 0ustar00 {fc@szdZddlmZddlmZmZmZmZddlm Z m Z m Z dZ dej fdYZd S( sFixer for import statements. If spam is being imported from the local directory, this import: from spam import eggs Becomes: from .spam import eggs And this import: import spam Becomes: from . import spam i(t fixer_basei(tdirnametjointexiststsep(t FromImporttsymsttokenccs|g}x|r|j}|jtjkr;|jVq |jtjkrwdjg|jD]}|j^q]Vq |jtj kr|j |jdq |jtj kr|j |jdddq t dq WdS(sF Walks over all the names imported in a dotted_as_names node. tiNisunknown node type(tpopttypeRtNAMEtvalueRt dotted_nameRtchildrentdotted_as_nametappendtdotted_as_namestextendtAssertionError(tnamestpendingtnodetch((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyttraverse_importss    * t FixImportcBs/eZeZdZdZdZdZRS(sj import_from< 'from' imp=any 'import' ['('] any [')'] > | import_name< 'import' imp=any > cCs/tt|j||d|jk|_dS(Ntabsolute_import(tsuperRt start_treetfuture_featurestskip(tselfttreetname((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR/scCs|jr dS|d}|jtjkr~x t|dsK|jd}q,W|j|jrd|j|_|jqnt }t }x2t |D]$}|j|rt }qt }qW|r|r|j |dndSt d|g}|j|_|SdS(NtimpR iu.s#absolute and local imports together(RR Rt import_fromthasattrRtprobably_a_local_importR tchangedtFalseRtTruetwarningRtprefix(RRtresultsR"t have_localt have_absolutetmod_nametnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyt transform3s,     cCs|jdrtS|jddd}t|j}t||}ttt|dsftSx4dtdddd gD]}t||rtSqWtS( Nu.iis __init__.pys.pys.pycs.sos.sls.pyd( t startswithR'tsplitRtfilenameRRRR((Rtimp_namet base_pathtext((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR%Us(t__name__t __module__R(t BM_compatibletPATTERNRR0R%(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyR&s   "N(t__doc__RRtos.pathRRRRt fixer_utilRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_import.pyt s " fixes/fix_imports.py000064400000013075147204472210010610 0ustar00"""Fix incompatible imports and module references.""" # Authors: Collin Winter, Nick Edds # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {'StringIO': 'io', 'cStringIO': 'io', 'cPickle': 'pickle', '__builtin__' : 'builtins', 'copy_reg': 'copyreg', 'Queue': 'queue', 'SocketServer': 'socketserver', 'ConfigParser': 'configparser', 'repr': 'reprlib', 'FileDialog': 'tkinter.filedialog', 'tkFileDialog': 'tkinter.filedialog', 'SimpleDialog': 'tkinter.simpledialog', 'tkSimpleDialog': 'tkinter.simpledialog', 'tkColorChooser': 'tkinter.colorchooser', 'tkCommonDialog': 'tkinter.commondialog', 'Dialog': 'tkinter.dialog', 'Tkdnd': 'tkinter.dnd', 'tkFont': 'tkinter.font', 'tkMessageBox': 'tkinter.messagebox', 'ScrolledText': 'tkinter.scrolledtext', 'Tkconstants': 'tkinter.constants', 'Tix': 'tkinter.tix', 'ttk': 'tkinter.ttk', 'Tkinter': 'tkinter', 'markupbase': '_markupbase', '_winreg': 'winreg', 'thread': '_thread', 'dummy_thread': '_dummy_thread', # anydbm and whichdb are handled by fix_imports2 'dbhash': 'dbm.bsd', 'dumbdbm': 'dbm.dumb', 'dbm': 'dbm.ndbm', 'gdbm': 'dbm.gnu', 'xmlrpclib': 'xmlrpc.client', 'DocXMLRPCServer': 'xmlrpc.server', 'SimpleXMLRPCServer': 'xmlrpc.server', 'httplib': 'http.client', 'htmlentitydefs' : 'html.entities', 'HTMLParser' : 'html.parser', 'Cookie': 'http.cookies', 'cookielib': 'http.cookiejar', 'BaseHTTPServer': 'http.server', 'SimpleHTTPServer': 'http.server', 'CGIHTTPServer': 'http.server', #'test.test_support': 'test.support', 'commands': 'subprocess', 'UserString' : 'collections', 'UserList' : 'collections', 'urlparse' : 'urllib.parse', 'robotparser' : 'urllib.robotparser', } def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(mapping=MAPPING): mod_list = ' | '.join(["module_name='%s'" % key for key in mapping]) bare_names = alternates(mapping.keys()) yield """name_import=import_name< 'import' ((%s) | multiple_imports=dotted_as_names< any* (%s) any* >) > """ % (mod_list, mod_list) yield """import_from< 'from' (%s) 'import' ['('] ( any | import_as_name< any 'as' any > | import_as_names< any* >) [')'] > """ % mod_list yield """import_name< 'import' (dotted_as_name< (%s) 'as' any > | multiple_imports=dotted_as_names< any* dotted_as_name< (%s) 'as' any > any* >) > """ % (mod_list, mod_list) # Find usages of module members in code e.g. thread.foo(bar) yield "power< bare_with_attr=(%s) trailer<'.' any > any* >" % bare_names class FixImports(fixer_base.BaseFix): BM_compatible = True keep_line_order = True # This is overridden in fix_imports2. mapping = MAPPING # We want to run this fixer late, so fix_import doesn't try to make stdlib # renames into relative imports. run_order = 6 def build_pattern(self): return "|".join(build_pattern(self.mapping)) def compile_pattern(self): # We override this, so MAPPING can be pragmatically altered and the # changes will be reflected in PATTERN. self.PATTERN = self.build_pattern() super(FixImports, self).compile_pattern() # Don't match the node if it's within another match. def match(self, node): match = super(FixImports, self).match results = match(node) if results: # Module usage could be in the trailer of an attribute lookup, so we # might have nested matches when "bare_with_attr" is present. if "bare_with_attr" not in results and \ any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False def start_tree(self, tree, filename): super(FixImports, self).start_tree(tree, filename) self.replace = {} def transform(self, node, results): import_mod = results.get("module_name") if import_mod: mod_name = import_mod.value new_name = unicode(self.mapping[mod_name]) import_mod.replace(Name(new_name, prefix=import_mod.prefix)) if "name_import" in results: # If it's not a "from x import x, y" or "import x as y" import, # marked its usage to be replaced. self.replace[mod_name] = new_name if "multiple_imports" in results: # This is a nasty hack to fix multiple imports on a line (e.g., # "import StringIO, urlparse"). The problem is that I can't # figure out an easy way to make a pattern recognize the keys of # MAPPING randomly sprinkled in an import statement. results = self.match(node) if results: self.transform(node, results) else: # Replace usage of the module. bare_name = results["bare_with_attr"][0] new_name = self.replace.get(bare_name.value) if new_name: bare_name.replace(Name(new_name, prefix=bare_name.prefix)) fixes/fix_imports.pyo000064400000012404147204472210010762 0ustar00 {fc@sdZddlmZddlmZmZi0dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d'd(6d)d*6d+d,6d-d.6d/d06d1d26d3d46d5d66d7d86d9d:6d;d<6d=d>6d?d@6dAdB6dCdD6dCdE6dFdG6dHdI6dJdK6dLdM6dNdO6dPdQ6dPdR6dPdS6dTdU6dVdW6dVdX6dYdZ6d[d\6Zd]Zed^Zd_ej fd`YZ daS(bs/Fix incompatible imports and module references.i(t fixer_base(tNamet attr_chaintiotStringIOt cStringIOtpickletcPickletbuiltinst __builtin__tcopyregtcopy_regtqueuetQueuet socketservert SocketServert configparsert ConfigParsertreprlibtreprstkinter.filedialogt FileDialogt tkFileDialogstkinter.simpledialogt SimpleDialogttkSimpleDialogstkinter.colorchooserttkColorChooserstkinter.commondialogttkCommonDialogstkinter.dialogtDialogs tkinter.dndtTkdnds tkinter.fontttkFontstkinter.messageboxt tkMessageBoxstkinter.scrolledtextt ScrolledTextstkinter.constantst Tkconstantss tkinter.tixtTixs tkinter.ttktttkttkintertTkintert _markupbaset markupbasetwinregt_winregt_threadtthreadt _dummy_threadt dummy_threadsdbm.bsdtdbhashsdbm.dumbtdumbdbmsdbm.ndbmtdbmsdbm.gnutgdbms xmlrpc.clientt xmlrpclibs xmlrpc.servertDocXMLRPCServertSimpleXMLRPCServers http.clientthttplibs html.entitiesthtmlentitydefss html.parsert HTMLParsers http.cookiestCookieshttp.cookiejart cookielibs http.servertBaseHTTPServertSimpleHTTPServert CGIHTTPServert subprocesstcommandst collectionst UserStringtUserLists urllib.parseturlparsesurllib.robotparsert robotparsercCsddjtt|dS(Nt(t|t)(tjointmapR(tmembers((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyt alternates=sccsldjg|D]}d|^q }t|j}d||fVd|Vd||fVd|VdS(Ns | smodule_name='%s'syname_import=import_name< 'import' ((%s) | multiple_imports=dotted_as_names< any* (%s) any* >) > simport_from< 'from' (%s) 'import' ['('] ( any | import_as_name< any 'as' any > | import_as_names< any* >) [')'] > simport_name< 'import' (dotted_as_name< (%s) 'as' any > | multiple_imports=dotted_as_names< any* dotted_as_name< (%s) 'as' any > any* >) > s3power< bare_with_attr=(%s) trailer<'.' any > any* >(RERHtkeys(tmappingtkeytmod_listt bare_names((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyt build_patternAs & t FixImportscBsMeZeZeZeZdZdZdZ dZ dZ dZ RS(icCsdjt|jS(NRC(RERNRJ(tself((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRN`scCs&|j|_tt|jdS(N(RNtPATTERNtsuperROtcompile_pattern(RP((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRScscsatt|j|}|r]d|krYtfdt|dDrYtS|StS(Ntbare_with_attrc3s|]}|VqdS(N((t.0tobj(tmatch(s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pys qstparent(RRRORWtanyRtFalse(RPtnodetresults((RWs1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyRWjs  %cCs&tt|j||i|_dS(N(RRROt start_treetreplace(RPttreetfilename((s1/usr/lib64/python2.7/lib2to3/fixes/fix_imports.pyR]vscCs|jd}|r|j}t|j|}|jt|d|jd|kri||j|sj    fixes/fix_imports2.py000064400000000441147204472210010663 0ustar00"""Fix incompatible imports and module references that must be fixed after fix_imports.""" from . import fix_imports MAPPING = { 'whichdb': 'dbm', 'anydbm': 'dbm', } class FixImports2(fix_imports.FixImports): run_order = 7 mapping = MAPPING fixes/fix_imports2.pyo000064400000001172147204472210011044 0ustar00 {fc@sGdZddlmZidd6dd6ZdejfdYZdS( sTFix incompatible imports and module references that must be fixed after fix_imports.i(t fix_importstdbmtwhichdbtanydbmt FixImports2cBseZdZeZRS(i(t__name__t __module__t run_ordertMAPPINGtmapping(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_imports2.pyR sN(t__doc__tRRt FixImportsR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_imports2.pyts  fixes/fix_input.py000064400000001306147204472210010244 0ustar00"""Fixer that changes input(...) into eval(input(...)).""" # Author: Andre Roberge # Local imports from .. import fixer_base from ..fixer_util import Call, Name from .. import patcomp context = patcomp.compile_pattern("power< 'eval' trailer< '(' any ')' > >") class FixInput(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'input' args=trailer< '(' [any] ')' > > """ def transform(self, node, results): # If we're already wrapped in an eval() call, we're done. if context.match(node.parent.parent): return new = node.clone() new.prefix = u"" return Call(Name(u"eval"), [new], prefix=node.prefix) fixes/fix_input.pyo000064400000002174147204472210010427 0ustar00 {fc@shdZddlmZddlmZmZddlmZejdZdej fdYZ dS( s4Fixer that changes input(...) into eval(input(...)).i(t fixer_base(tCalltName(tpatcomps&power< 'eval' trailer< '(' any ')' > >tFixInputcBseZeZdZdZRS(sL power< 'input' args=trailer< '(' [any] ')' > > cCsMtj|jjrdS|j}d|_ttd|gd|jS(Nuuevaltprefix(tcontexttmatchtparenttcloneRRR(tselftnodetresultstnew((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyt transforms   (t__name__t __module__tTruet BM_compatibletPATTERNR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyR sN( t__doc__tRt fixer_utilRRRtcompile_patternRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_input.pyts fixes/fix_intern.py000064400000003512147204472210010405 0ustar00# Copyright 2006 Georg Brandl. # Licensed to PSF under a Contributor Agreement. """Fixer for intern(). intern(s) -> sys.intern(s)""" # Local imports from .. import pytree from .. import fixer_base from ..fixer_util import Name, Attr, touch_import class FixIntern(fixer_base.BaseFix): BM_compatible = True order = "pre" PATTERN = """ power< 'intern' trailer< lpar='(' ( not(arglist | argument) any ','> ) rpar=')' > after=any* > """ def transform(self, node, results): if results: # I feel like we should be able to express this logic in the # PATTERN above but I don't know how to do it so... obj = results['obj'] if obj: if obj.type == self.syms.star_expr: return # Make no change. if (obj.type == self.syms.argument and obj.children[0].value == '**'): return # Make no change. syms = self.syms obj = results["obj"].clone() if obj.type == syms.arglist: newarglist = obj.clone() else: newarglist = pytree.Node(syms.arglist, [obj.clone()]) after = results["after"] if after: after = [n.clone() for n in after] new = pytree.Node(syms.power, Attr(Name(u"sys"), Name(u"intern")) + [pytree.Node(syms.trailer, [results["lpar"].clone(), newarglist, results["rpar"].clone()])] + after) new.prefix = node.prefix touch_import(None, u'sys', node) return new fixes/fix_intern.pyo000064400000003405147204472210010565 0ustar00 {fc@s_dZddlmZddlmZddlmZmZmZdejfdYZ dS(s/Fixer for intern(). intern(s) -> sys.intern(s)i(tpytree(t fixer_base(tNametAttrt touch_importt FixInterncBs#eZeZdZdZdZRS(tpres power< 'intern' trailer< lpar='(' ( not(arglist | argument) any ','> ) rpar=')' > after=any* > c Cso|rd|d}|rd|j|jjkr/dS|j|jjkra|jdjdkradSqdn|j}|dj}|j|jkr|j}ntj |j|jg}|d}|rg|D]}|j^q}ntj |j t t dt dtj |j |dj||djgg|}|j|_tdd||S( Ntobjis**tafterusysuinterntlpartrpar(ttypetsymst star_exprtargumenttchildrentvaluetclonetarglistRtNodetpowerRRttrailertprefixRtNone( tselftnodetresultsRR t newarglistRtntnew((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pyt transforms*    " U (t__name__t __module__tTruet BM_compatibletordertPATTERNR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pyRs N( t__doc__tRRt fixer_utilRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_intern.pytsfixes/fix_isinstance.py000064400000003111147204472210011241 0ustar00# Copyright 2008 Armin Ronacher. # Licensed to PSF under a Contributor Agreement. """Fixer that cleans up a tuple argument to isinstance after the tokens in it were fixed. This is mainly used to remove double occurrences of tokens as a leftover of the long -> int / unicode -> str conversion. eg. isinstance(x, (int, long)) -> isinstance(x, (int, int)) -> isinstance(x, int) """ from .. import fixer_base from ..fixer_util import token class FixIsinstance(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< 'isinstance' trailer< '(' arglist< any ',' atom< '(' args=testlist_gexp< any+ > ')' > > ')' > > """ run_order = 6 def transform(self, node, results): names_inserted = set() testlist = results["args"] args = testlist.children new_args = [] iterator = enumerate(args) for idx, arg in iterator: if arg.type == token.NAME and arg.value in names_inserted: if idx < len(args) - 1 and args[idx + 1].type == token.COMMA: iterator.next() continue else: new_args.append(arg) if arg.type == token.NAME: names_inserted.add(arg.value) if new_args and new_args[-1].type == token.COMMA: del new_args[-1] if len(new_args) == 1: atom = testlist.parent new_args[0].prefix = atom.prefix atom.replace(new_args[0]) else: args[:] = new_args node.changed() fixes/fix_itertools.py000064400000003016147204472210011131 0ustar00""" Fixer for itertools.(imap|ifilter|izip) --> (map|filter|zip) and itertools.ifilterfalse --> itertools.filterfalse (bugs 2360-2363) imports from itertools are fixed in fix_itertools_import.py If itertools is imported as something else (ie: import itertools as it; it.izip(spam, eggs)) method calls will not get fixed. """ # Local imports from .. import fixer_base from ..fixer_util import Name class FixItertools(fixer_base.BaseFix): BM_compatible = True it_funcs = "('imap'|'ifilter'|'izip'|'izip_longest'|'ifilterfalse')" PATTERN = """ power< it='itertools' trailer< dot='.' func=%(it_funcs)s > trailer< '(' [any] ')' > > | power< func=%(it_funcs)s trailer< '(' [any] ')' > > """ %(locals()) # Needs to be run after fix_(map|zip|filter) run_order = 6 def transform(self, node, results): prefix = None func = results['func'][0] if ('it' in results and func.value not in (u'ifilterfalse', u'izip_longest')): dot, it = (results['dot'], results['it']) # Remove the 'itertools' prefix = it.prefix it.remove() # Replace the node which contains ('.', 'function') with the # function (to be consistent with the second part of the pattern) dot.remove() func.parent.replace(func) prefix = prefix or func.prefix func.replace(Name(func.value[1:], prefix=prefix)) fixes/fix_itertools_imports.py000064400000004056147204472210012713 0ustar00""" Fixer for imports of itertools.(imap|ifilter|izip|ifilterfalse) """ # Local imports from lib2to3 import fixer_base from lib2to3.fixer_util import BlankLine, syms, token class FixItertoolsImports(fixer_base.BaseFix): BM_compatible = True PATTERN = """ import_from< 'from' 'itertools' 'import' imports=any > """ %(locals()) def transform(self, node, results): imports = results['imports'] if imports.type == syms.import_as_name or not imports.children: children = [imports] else: children = imports.children for child in children[::2]: if child.type == token.NAME: member = child.value name_node = child elif child.type == token.STAR: # Just leave the import as is. return else: assert child.type == syms.import_as_name name_node = child.children[0] member_name = name_node.value if member_name in (u'imap', u'izip', u'ifilter'): child.value = None child.remove() elif member_name in (u'ifilterfalse', u'izip_longest'): node.changed() name_node.value = (u'filterfalse' if member_name[1] == u'f' else u'zip_longest') # Make sure the import statement is still sane children = imports.children[:] or [imports] remove_comma = True for child in children: if remove_comma and child.type == token.COMMA: child.remove() else: remove_comma ^= True while children and children[-1].type == token.COMMA: children.pop().remove() # If there are no imports left, just get rid of the entire statement if (not (imports.children or getattr(imports, 'value', None)) or imports.parent is None): p = node.prefix node = BlankLine() node.prefix = p return node fixes/fix_ne.py000064400000001075147204472210007512 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that turns <> into !=.""" # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base class FixNe(fixer_base.BaseFix): # This is so simple that we don't need the pattern compiler. _accept_type = token.NOTEQUAL def match(self, node): # Override return node.value == u"<>" def transform(self, node, results): new = pytree.Leaf(token.NOTEQUAL, u"!=", prefix=node.prefix) return new fixes/fix_next.py000064400000006155147204472210010072 0ustar00"""Fixer for it.next() -> next(it), per PEP 3114.""" # Author: Collin Winter # Things that currently aren't covered: # - listcomp "next" names aren't warned # - "with" statement targets aren't checked # Local imports from ..pgen2 import token from ..pygram import python_symbols as syms from .. import fixer_base from ..fixer_util import Name, Call, find_binding bind_warning = "Calls to builtin next() possibly shadowed by global binding" class FixNext(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< base=any+ trailer< '.' attr='next' > trailer< '(' ')' > > | power< head=any+ trailer< '.' attr='next' > not trailer< '(' ')' > > | classdef< 'class' any+ ':' suite< any* funcdef< 'def' name='next' parameters< '(' NAME ')' > any+ > any* > > | global=global_stmt< 'global' any* 'next' any* > """ order = "pre" # Pre-order tree traversal def start_tree(self, tree, filename): super(FixNext, self).start_tree(tree, filename) n = find_binding(u'next', tree) if n: self.warning(n, bind_warning) self.shadowed_next = True else: self.shadowed_next = False def transform(self, node, results): assert results base = results.get("base") attr = results.get("attr") name = results.get("name") if base: if self.shadowed_next: attr.replace(Name(u"__next__", prefix=attr.prefix)) else: base = [n.clone() for n in base] base[0].prefix = u"" node.replace(Call(Name(u"next", prefix=node.prefix), base)) elif name: n = Name(u"__next__", prefix=name.prefix) name.replace(n) elif attr: # We don't do this transformation if we're assigning to "x.next". # Unfortunately, it doesn't seem possible to do this in PATTERN, # so it's being done here. if is_assign_target(node): head = results["head"] if "".join([str(n) for n in head]).strip() == u'__builtin__': self.warning(node, bind_warning) return attr.replace(Name(u"__next__")) elif "global" in results: self.warning(node, bind_warning) self.shadowed_next = True ### The following functions help test if node is part of an assignment ### target. def is_assign_target(node): assign = find_assign(node) if assign is None: return False for child in assign.children: if child.type == token.EQUAL: return False elif is_subtree(child, node): return True return False def find_assign(node): if node.type == syms.expr_stmt: return node if node.type == syms.simple_stmt or node.parent is None: return None return find_assign(node.parent) def is_subtree(root, node): if root == node: return True return any(is_subtree(c, node) for c in root.children) fixes/fix_numliterals.pyo000064400000002361147204472210011625 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(s-Fixer that turns 1L into 1, 0755 into 0o755. i(ttoken(t fixer_base(tNumbertFixNumliteralscBs#eZejZdZdZRS(cCs#|jjdp"|jddkS(Nu0iuLl(tvaluet startswith(tselftnode((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pytmatchscCs}|j}|ddkr&|d }nD|jdrj|jrjtt|dkrjd|d}nt|d|jS(NiuLlu0iu0otprefix(RRtisdigittlentsetRR (RRtresultstval((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pyt transforms   3(t__name__t __module__RtNUMBERt _accept_typeRR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pyR s  N( t__doc__tpgen2RtRt fixer_utilRtBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_numliterals.pytsfixes/fix_paren.py000064400000002314147204472210010212 0ustar00"""Fixer that addes parentheses where they are required This converts ``[x for x in 1, 2]`` to ``[x for x in (1, 2)]``.""" # By Taek Joo Kim and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import LParen, RParen # XXX This doesn't support nested for loops like [x for x in 1, 2 for x in 1, 2] class FixParen(fixer_base.BaseFix): BM_compatible = True PATTERN = """ atom< ('[' | '(') (listmaker< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > > | testlist_gexp< any comp_for< 'for' NAME 'in' target=testlist_safe< any (',' any)+ [','] > [any] > >) (']' | ')') > """ def transform(self, node, results): target = results["target"] lparen = LParen() lparen.prefix = target.prefix target.prefix = u"" # Make it hug the parentheses target.insert_child(0, lparen) target.append_child(RParen()) fixes/fix_print.py000064400000005461147204472210010247 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for print. Change: 'print' into 'print()' 'print ...' into 'print(...)' 'print ... ,' into 'print(..., end=" ")' 'print >>x, ...' into 'print(..., file=x)' No changes are applied if print_function is imported from __future__ """ # Local imports from .. import patcomp from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, Comma, String, is_tuple parend_expr = patcomp.compile_pattern( """atom< '(' [atom|STRING|NAME] ')' >""" ) class FixPrint(fixer_base.BaseFix): BM_compatible = True PATTERN = """ simple_stmt< any* bare='print' any* > | print_stmt """ def transform(self, node, results): assert results bare_print = results.get("bare") if bare_print: # Special-case print all by itself bare_print.replace(Call(Name(u"print"), [], prefix=bare_print.prefix)) return assert node.children[0] == Name(u"print") args = node.children[1:] if len(args) == 1 and parend_expr.match(args[0]): # We don't want to keep sticking parens around an # already-parenthesised expression. return sep = end = file = None if args and args[-1] == Comma(): args = args[:-1] end = " " if args and args[0] == pytree.Leaf(token.RIGHTSHIFT, u">>"): assert len(args) >= 2 file = args[1].clone() args = args[3:] # Strip a possible comma after the file expression # Now synthesize a print(args, sep=..., end=..., file=...) node. l_args = [arg.clone() for arg in args] if l_args: l_args[0].prefix = u"" if sep is not None or end is not None or file is not None: if sep is not None: self.add_kwarg(l_args, u"sep", String(repr(sep))) if end is not None: self.add_kwarg(l_args, u"end", String(repr(end))) if file is not None: self.add_kwarg(l_args, u"file", file) n_stmt = Call(Name(u"print"), l_args) n_stmt.prefix = node.prefix return n_stmt def add_kwarg(self, l_nodes, s_kwd, n_expr): # XXX All this prefix-setting may lose comments (though rarely) n_expr.prefix = u"" n_argument = pytree.Node(self.syms.argument, (Name(s_kwd), pytree.Leaf(token.EQUAL, u"="), n_expr)) if l_nodes: l_nodes.append(Comma()) n_argument.prefix = u" " l_nodes.append(n_argument) fixes/fix_raw_input.pyo000064400000001666147204472210011305 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s2Fixer that changes raw_input(...) into input(...).i(t fixer_base(tNamet FixRawInputcBseZeZdZdZRS(sU power< name='raw_input' trailer< '(' [any] ')' > any* > cCs*|d}|jtdd|jdS(Ntnameuinputtprefix(treplaceRR(tselftnodetresultsR((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pyt transforms (t__name__t __module__tTruet BM_compatibletPATTERNR (((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pyRsN(t__doc__tRt fixer_utilRtBaseFixR(((s3/usr/lib64/python2.7/lib2to3/fixes/fix_raw_input.pytsfixes/fix_repr.py000064400000001146147204472210010057 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that transforms `xyzzy` into repr(xyzzy).""" # Local imports from .. import fixer_base from ..fixer_util import Call, Name, parenthesize class FixRepr(fixer_base.BaseFix): BM_compatible = True PATTERN = """ atom < '`' expr=any '`' > """ def transform(self, node, results): expr = results["expr"].clone() if expr.type == self.syms.testlist1: expr = parenthesize(expr) return Call(Name(u"repr"), [expr], prefix=node.prefix) fixes/fix_tuple_params.py000064400000012711147204472210011603 0ustar00"""Fixer for function definitions with tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error in Python 3 lambda (x): x + y -> lambda x: x + y """ # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Assign, Name, Newline, Number, Subscript, syms def is_docstring(stmt): return isinstance(stmt, pytree.Node) and \ stmt.children[0].type == token.STRING class FixTupleParams(fixer_base.BaseFix): run_order = 4 #use a lower order since lambda is part of other #patterns BM_compatible = True PATTERN = """ funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > """ def transform(self, node, results): if "lambda" in results: return self.transform_lambda(node, results) new_lines = [] suite = results["suite"] args = results["args"] # This crap is so "def foo(...): x = 5; y = 7" is handled correctly. # TODO(cwinter): suite-cleanup if suite[0].children[1].type == token.INDENT: start = 2 indent = suite[0].children[1].value end = Newline() else: start = 0 indent = u"; " end = pytree.Leaf(token.INDENT, u"") # We need access to self for new_name(), and making this a method # doesn't feel right. Closing over self and new_lines makes the # code below cleaner. def handle_tuple(tuple_arg, add_prefix=False): n = Name(self.new_name()) arg = tuple_arg.clone() arg.prefix = u"" stmt = Assign(arg, n.clone()) if add_prefix: n.prefix = u" " tuple_arg.replace(n) new_lines.append(pytree.Node(syms.simple_stmt, [stmt, end.clone()])) if args.type == syms.tfpdef: handle_tuple(args) elif args.type == syms.typedargslist: for i, arg in enumerate(args.children): if arg.type == syms.tfpdef: # Without add_prefix, the emitted code is correct, # just ugly. handle_tuple(arg, add_prefix=(i > 0)) if not new_lines: return # This isn't strictly necessary, but it plays nicely with other fixers. # TODO(cwinter) get rid of this when children becomes a smart list for line in new_lines: line.parent = suite[0] # TODO(cwinter) suite-cleanup after = start if start == 0: new_lines[0].prefix = u" " elif is_docstring(suite[0].children[start]): new_lines[0].prefix = indent after = start + 1 for line in new_lines: line.parent = suite[0] suite[0].children[after:after] = new_lines for i in range(after+1, after+len(new_lines)+1): suite[0].children[i].prefix = indent suite[0].changed() def transform_lambda(self, node, results): args = results["args"] body = results["body"] inner = simplify_args(results["inner"]) # Replace lambda ((((x)))): x with lambda x: x if inner.type == token.NAME: inner = inner.clone() inner.prefix = u" " args.replace(inner) return params = find_params(args) to_index = map_to_index(params) tup_name = self.new_name(tuple_name(params)) new_param = Name(tup_name, prefix=u" ") args.replace(new_param.clone()) for n in body.post_order(): if n.type == token.NAME and n.value in to_index: subscripts = [c.clone() for c in to_index[n.value]] new = pytree.Node(syms.power, [new_param.clone()] + subscripts) new.prefix = n.prefix n.replace(new) ### Helper functions for transform_lambda() def simplify_args(node): if node.type in (syms.vfplist, token.NAME): return node elif node.type == syms.vfpdef: # These look like vfpdef< '(' x ')' > where x is NAME # or another vfpdef instance (leading to recursion). while node.type == syms.vfpdef: node = node.children[1] return node raise RuntimeError("Received unexpected node %s" % node) def find_params(node): if node.type == syms.vfpdef: return find_params(node.children[1]) elif node.type == token.NAME: return node.value return [find_params(c) for c in node.children if c.type != token.COMMA] def map_to_index(param_list, prefix=[], d=None): if d is None: d = {} for i, obj in enumerate(param_list): trailer = [Subscript(Number(unicode(i)))] if isinstance(obj, list): map_to_index(obj, trailer, d=d) else: d[obj] = prefix + trailer return d def tuple_name(param_list): l = [] for obj in param_list: if isinstance(obj, list): l.append(tuple_name(obj)) else: l.append(obj) return u"_".join(l) fixes/fix_tuple_params.pyo000064400000012522147204472210011762 0ustar00 {fc@sdZddlmZddlmZddlmZddlmZmZm Z m Z m Z m Z dZ dejfdYZd Zd Zgd d Zd Zd S(s:Fixer for function definitions with tuple parameters. def func(((a, b), c), d): ... -> def func(x, d): ((a, b), c) = x ... It will also support lambdas: lambda (x, y): x + y -> lambda t: t[0] + t[1] # The parens are a syntax error in Python 3 lambda (x): x + y -> lambda x: x + y i(tpytree(ttoken(t fixer_base(tAssigntNametNewlinetNumbert SubscripttsymscCs)t|tjo(|jdjtjkS(Ni(t isinstanceRtNodetchildrenttypeRtSTRING(tstmt((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt is_docstringstFixTupleParamscBs,eZdZeZdZdZdZRS(is funcdef< 'def' any parameters< '(' args=any ')' > ['->' any] ':' suite=any+ > | lambda= lambdef< 'lambda' args=vfpdef< '(' inner=any ')' > ':' body=any > c s0d|krj||Sg|d}|d}|djdjtjkryd}|djdj}tn!d}d}tjtjdt fd }|jt j kr||n`|jt j kr1xKt |jD]7\}} | jt j kr|| d |dkqqWns;dSxD]} |d| _qBW|} |dkr{d d_n1t|dj|r|d_|d} nxD]} |d| _qW|dj| | +x=t| d| tdD]}||dj|_qW|djdS( Ntlambdatsuitetargsiiiu; ucstj}|j}d|_t||j}|rNd|_n|j|jtjt j |jgdS(Nuu ( Rtnew_nametclonetprefixRtreplacetappendRR Rt simple_stmt(t tuple_argt add_prefixtntargR(tendt new_linestself(s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt handle_tupleCs    Ru (ttransform_lambdaR R RtINDENTtvalueRRtLeaftFalseRttfpdeft typedargslistt enumeratetparentRRtrangetlentchanged( R tnodetresultsRRtstarttindentR!tiRtlinetafter((RRR s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyt transform.sF            (cCsN|d}|d}t|d}|jtjkr\|j}d|_|j|dSt|}t|}|j t |}t |dd} |j| jx|j D]} | jtjkr| j |krg|| j D]} | j^q} tjtj| jg| } | j| _| j| qqWdS(NRtbodytinneru R(t simplify_argsR RtNAMERRRt find_paramst map_to_indexRt tuple_nameRt post_orderR$RR Rtpower(R R.R/RR6R7tparamstto_indexttup_namet new_paramRtct subscriptstnew((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR"ns(       !&  (t__name__t __module__t run_ordertTruet BM_compatibletPATTERNR5R"(((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyRs   @cCso|jtjtjfkr|S|jtjkr[x#|jtjkrV|jd}q4W|Std|dS(NisReceived unexpected node %s(R RtvfplistRR9tvfpdefR t RuntimeError(R.((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR8scCsn|jtjkr#t|jdS|jtjkr<|jSg|jD]$}|jtjkrFt|^qFS(Ni( R RRMR:R RR9R$tCOMMA(R.RC((s6/usr/lib64/python2.7/lib2to3/fixes/fix_tuple_params.pyR:s cCs|dkri}nxht|D]Z\}}ttt|g}t|trnt||d|q"||||s. l  fixes/fix_types.py000064400000003421147204472210010251 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for removing uses of the types module. These work for only the known names in the types module. The forms above can include types. or not. ie, It is assumed the module is imported either as: import types from types import ... # either * or specific types The import statements are not modified. There should be another fixer that handles at least the following constants: type([]) -> list type(()) -> tuple type('') -> str """ # Local imports from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name _TYPE_MAPPING = { 'BooleanType' : 'bool', 'BufferType' : 'memoryview', 'ClassType' : 'type', 'ComplexType' : 'complex', 'DictType': 'dict', 'DictionaryType' : 'dict', 'EllipsisType' : 'type(Ellipsis)', #'FileType' : 'io.IOBase', 'FloatType': 'float', 'IntType': 'int', 'ListType': 'list', 'LongType': 'int', 'ObjectType' : 'object', 'NoneType': 'type(None)', 'NotImplementedType' : 'type(NotImplemented)', 'SliceType' : 'slice', 'StringType': 'bytes', # XXX ? 'StringTypes' : '(str,)', # XXX ? 'TupleType': 'tuple', 'TypeType' : 'type', 'UnicodeType': 'str', 'XRangeType' : 'range', } _pats = ["power< 'types' trailer< '.' name='%s' > >" % t for t in _TYPE_MAPPING] class FixTypes(fixer_base.BaseFix): BM_compatible = True PATTERN = '|'.join(_pats) def transform(self, node, results): new_value = unicode(_TYPE_MAPPING.get(results["name"].value)) if new_value: return Name(new_value, prefix=node.prefix) return None fixes/fix_types.pyo000064400000004242147204472210010432 0ustar00 {fc@s dZddlmZddlmZddlmZidd6dd6d d 6d d 6d d6d d6dd6dd6dd6dd6dd6dd6dd6dd6dd 6d!d"6d#d$6d%d&6d d'6d(d)6d*d+6ZgeD]Zd,e^qZ d-ej fd.YZ d/S(0sFixer for removing uses of the types module. These work for only the known names in the types module. The forms above can include types. or not. ie, It is assumed the module is imported either as: import types from types import ... # either * or specific types The import statements are not modified. There should be another fixer that handles at least the following constants: type([]) -> list type(()) -> tuple type('') -> str i(ttoken(t fixer_base(tNametboolt BooleanTypet memoryviewt BufferTypettypet ClassTypetcomplext ComplexTypetdicttDictTypetDictionaryTypestype(Ellipsis)t EllipsisTypetfloatt FloatTypetinttIntTypetlisttListTypetLongTypetobjectt ObjectTypes type(None)tNoneTypestype(NotImplemented)tNotImplementedTypetslicet SliceTypetbytest StringTypes(str,)t StringTypesttuplet TupleTypetTypeTypetstrt UnicodeTypetranget XRangeTypes)power< 'types' trailer< '.' name='%s' > >tFixTypescBs&eZeZdjeZdZRS(t|cCs9ttj|dj}|r5t|d|jSdS(Ntnametprefix(tunicodet _TYPE_MAPPINGtgettvalueRR)tNone(tselftnodetresultst new_value((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyt transform:s(t__name__t __module__tTruet BM_compatibletjoint_patstPATTERNR3(((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyR&6sN( t__doc__tpgen2RtRt fixer_utilRR+ttR9tBaseFixR&(((s//usr/lib64/python2.7/lib2to3/fixes/fix_types.pyts6 fixes/fix_unicode.py000064400000002365147204472210010541 0ustar00r"""Fixer for unicode. * Changes unicode to str and unichr to chr. * If "...\u..." is not unicode literal change it into "...\\u...". * Change u"..." into "...". """ from ..pgen2 import token from .. import fixer_base _mapping = {u"unichr" : u"chr", u"unicode" : u"str"} class FixUnicode(fixer_base.BaseFix): BM_compatible = True PATTERN = "STRING | 'unicode' | 'unichr'" def start_tree(self, tree, filename): super(FixUnicode, self).start_tree(tree, filename) self.unicode_literals = 'unicode_literals' in tree.future_features def transform(self, node, results): if node.type == token.NAME: new = node.clone() new.value = _mapping[node.value] return new elif node.type == token.STRING: val = node.value if not self.unicode_literals and val[0] in u'\'"' and u'\\' in val: val = ur'\\'.join([ v.replace(u'\\u', ur'\\u').replace(u'\\U', ur'\\U') for v in val.split(ur'\\') ]) if val[0] in u'uU': val = val[1:] if val == node.value: return node new = node.clone() new.value = val return new fixes/fix_urllib.py000064400000020301147204472210010372 0ustar00"""Fix changes imports of urllib which are now incompatible. This is rather similar to fix_imports, but because of the more complex nature of the fixing for urllib, it has its own fixer. """ # Author: Nick Edds # Local imports from lib2to3.fixes.fix_imports import alternates, FixImports from lib2to3 import fixer_base from lib2to3.fixer_util import (Name, Comma, FromImport, Newline, find_indentation, Node, syms) MAPPING = {"urllib": [ ("urllib.request", ["URLopener", "FancyURLopener", "urlretrieve", "_urlopener", "urlopen", "urlcleanup", "pathname2url", "url2pathname"]), ("urllib.parse", ["quote", "quote_plus", "unquote", "unquote_plus", "urlencode", "splitattr", "splithost", "splitnport", "splitpasswd", "splitport", "splitquery", "splittag", "splittype", "splituser", "splitvalue", ]), ("urllib.error", ["ContentTooShortError"])], "urllib2" : [ ("urllib.request", ["urlopen", "install_opener", "build_opener", "Request", "OpenerDirector", "BaseHandler", "HTTPDefaultErrorHandler", "HTTPRedirectHandler", "HTTPCookieProcessor", "ProxyHandler", "HTTPPasswordMgr", "HTTPPasswordMgrWithDefaultRealm", "AbstractBasicAuthHandler", "HTTPBasicAuthHandler", "ProxyBasicAuthHandler", "AbstractDigestAuthHandler", "HTTPDigestAuthHandler", "ProxyDigestAuthHandler", "HTTPHandler", "HTTPSHandler", "FileHandler", "FTPHandler", "CacheFTPHandler", "UnknownHandler"]), ("urllib.error", ["URLError", "HTTPError"]), ] } # Duplicate the url parsing functions for urllib2. MAPPING["urllib2"].append(MAPPING["urllib"][1]) def build_pattern(): bare = set() for old_module, changes in MAPPING.items(): for change in changes: new_module, members = change members = alternates(members) yield """import_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > """ % (old_module, old_module) yield """import_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > """ % (old_module, members, members) yield """import_from< 'from' module_star=%r 'import' star='*' > """ % old_module yield """import_name< 'import' dotted_as_name< module_as=%r 'as' any > > """ % old_module # bare_with_attr has a special significance for FixImports.match(). yield """power< bare_with_attr=%r trailer< '.' member=%s > any* > """ % (old_module, members) class FixUrllib(FixImports): def build_pattern(self): return "|".join(build_pattern()) def transform_import(self, node, results): """Transform for the basic import case. Replaces the old import name with a comma separated list of its replacements. """ import_mod = results.get("module") pref = import_mod.prefix names = [] # create a Node list of the replacement modules for name in MAPPING[import_mod.value][:-1]: names.extend([Name(name[0], prefix=pref), Comma()]) names.append(Name(MAPPING[import_mod.value][-1][0], prefix=pref)) import_mod.replace(names) def transform_member(self, node, results): """Transform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. """ mod_member = results.get("mod_member") pref = mod_member.prefix member = results.get("member") # Simple case with only a single member being imported if member: # this may be a list of length one, or just a node if isinstance(member, list): member = member[0] new_name = None for change in MAPPING[mod_member.value]: if member.value in change[1]: new_name = change[0] break if new_name: mod_member.replace(Name(new_name, prefix=pref)) else: self.cannot_convert(node, "This is an invalid module element") # Multiple members being imported else: # a dictionary for replacements, order matters modules = [] mod_dict = {} members = results["members"] for member in members: # we only care about the actual members if member.type == syms.import_as_name: as_name = member.children[2].value member_name = member.children[0].value else: member_name = member.value as_name = None if member_name != u",": for change in MAPPING[mod_member.value]: if member_name in change[1]: if change[0] not in mod_dict: modules.append(change[0]) mod_dict.setdefault(change[0], []).append(member) new_nodes = [] indentation = find_indentation(node) first = True def handle_name(name, prefix): if name.type == syms.import_as_name: kids = [Name(name.children[0].value, prefix=prefix), name.children[1].clone(), name.children[2].clone()] return [Node(syms.import_as_name, kids)] return [Name(name.value, prefix=prefix)] for module in modules: elts = mod_dict[module] names = [] for elt in elts[:-1]: names.extend(handle_name(elt, pref)) names.append(Comma()) names.extend(handle_name(elts[-1], pref)) new = FromImport(module, names) if not first or node.parent.prefix.endswith(indentation): new.prefix = indentation new_nodes.append(new) first = False if new_nodes: nodes = [] for new_node in new_nodes[:-1]: nodes.extend([new_node, Newline()]) nodes.append(new_nodes[-1]) node.replace(nodes) else: self.cannot_convert(node, "All module elements are invalid") def transform_dot(self, node, results): """Transform for calls to module members in code.""" module_dot = results.get("bare_with_attr") member = results.get("member") new_name = None if isinstance(member, list): member = member[0] for change in MAPPING[module_dot.value]: if member.value in change[1]: new_name = change[0] break if new_name: module_dot.replace(Name(new_name, prefix=module_dot.prefix)) else: self.cannot_convert(node, "This is an invalid module element") def transform(self, node, results): if results.get("module"): self.transform_import(node, results) elif results.get("mod_member"): self.transform_member(node, results) elif results.get("bare_with_attr"): self.transform_dot(node, results) # Renaming and star imports are not supported for these modules. elif results.get("module_star"): self.cannot_convert(node, "Cannot handle star imports.") elif results.get("module_as"): self.cannot_convert(node, "This module is now multiple modules") fixes/fix_urllib.pyo000064400000015770147204472210010567 0ustar00 {fc@ssdZddlmZmZddlmZddlmZmZm Z m Z m Z m Z m Z iddddd d d d d gfddddddddddddddddgfddgfgd 6dd d!d"d#d$d%d&d'd(d)d*d+d,d-d.d/d0d1d2d3d4d5d6d7gfdd8d9gfgd:6Zed:jed d;d<Zd=efd>YZd?S(@sFix changes imports of urllib which are now incompatible. This is rather similar to fix_imports, but because of the more complex nature of the fixing for urllib, it has its own fixer. i(t alternatest FixImports(t fixer_base(tNametCommat FromImporttNewlinetfind_indentationtNodetsymssurllib.requestt URLopenertFancyURLopenert urlretrievet _urlopenerturlopent urlcleanupt pathname2urlt url2pathnames urllib.parsetquotet quote_plustunquotet unquote_plust urlencodet splitattrt splithostt splitnportt splitpasswdt splitportt splitquerytsplittagt splittypet splitusert splitvalues urllib.errortContentTooShortErrorturllibtinstall_openert build_openertRequesttOpenerDirectort BaseHandlertHTTPDefaultErrorHandlertHTTPRedirectHandlertHTTPCookieProcessort ProxyHandlertHTTPPasswordMgrtHTTPPasswordMgrWithDefaultRealmtAbstractBasicAuthHandlertHTTPBasicAuthHandlertProxyBasicAuthHandlertAbstractDigestAuthHandlertHTTPDigestAuthHandlertProxyDigestAuthHandlert HTTPHandlert HTTPSHandlert FileHandlert FTPHandlertCacheFTPHandlertUnknownHandlertURLErrort HTTPErrorturllib2iccst}xtjD]w\}}xh|D]`}|\}}t|}d||fVd|||fVd|Vd|Vd||fVq)WqWdS(Nsimport_name< 'import' (module=%r | dotted_as_names< any* module=%r any* >) > simport_from< 'from' mod_member=%r 'import' ( member=%s | import_as_name< member=%s 'as' any > | import_as_names< members=any* >) > sIimport_from< 'from' module_star=%r 'import' star='*' > stimport_name< 'import' dotted_as_name< module_as=%r 'as' any > > sKpower< bare_with_attr=%r trailer< '.' member=%s > any* > (tsettMAPPINGtitemsR(tbaret old_moduletchangestchanget new_moduletmembers((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt build_pattern1s      t FixUrllibcBs5eZdZdZdZdZdZRS(cCsdjtS(Nt|(tjoinRF(tself((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyRFJscCs|jd}|j}g}x?t|jd D],}|jt|dd|tgq0W|jtt|jddd||j|dS(sTransform for the basic import case. Replaces the old import name with a comma separated list of its replacements. tmoduleiitprefixN( tgetRLR>tvaluetextendRRtappendtreplace(RJtnodetresultst import_modtpreftnamestname((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyttransform_importMs *(cCs|jd}|j}|jd}|rt|trI|d}nd }x6t|jD]'}|j|dkr]|d}Pq]q]W|r|jt|d|q|j |dn/g}i} |d} x| D]}|j t j kr|j dj} |j dj} n|j} d } | d krxlt|jD]Z}| |dkr>|d| krx|j|dn| j|dgj|q>q>WqqWg} t|}t}d }x|D]}| |}g}x8|d D],}|j||||jtqW|j||d |t||}| sa|jjj|rm||_n| j|t}qW| rg}x(| d D]}|j|tgqW|j| d |j|n|j |d d S(sTransform for imports of specific module elements. Replaces the module to be imported from with the appropriate new module. t mod_membertmemberiiRLs!This is an invalid module elementREiu,cSsz|jtjkrdt|jdjd||jdj|jdjg}ttj|gSt|jd|gS(NiRLii(ttypeR timport_as_nameRtchildrenRNtcloneR(RWRLtkids((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt handle_names isAll module elements are invalidN(RMRLt isinstancetlisttNoneR>RNRQRtcannot_convertR[R R\R]RPt setdefaultRtTrueRORRtparenttendswithtFalseR(RJRRRSRYRURZtnew_nameRCtmodulestmod_dictREtas_namet member_namet new_nodest indentationtfirstR`RKteltsRVtelttnewtnodestnew_node((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyttransform_member]sh       +       cCs|jd}|jd}d}t|tr@|d}nx6t|jD]'}|j|dkrN|d}PqNqNW|r|jt|d|jn|j |ddS(s.Transform for calls to module members in code.tbare_with_attrRZiiRLs!This is an invalid module elementN( RMRcRaRbR>RNRQRRLRd(RJRRRSt module_dotRZRjRC((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt transform_dots  cCs|jdr"|j||n|jdrD|j||nf|jdrf|j||nD|jdr|j|dn"|jdr|j|dndS(NRKRYRxt module_starsCannot handle star imports.t module_ass#This module is now multiple modules(RMRXRwRzRd(RJRRRS((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyt transforms(t__name__t __module__RFRXRwRzR}(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pyRGHs    L N(t__doc__tlib2to3.fixes.fix_importsRRtlib2to3Rtlib2to3.fixer_utilRRRRRRR R>RPRFRG(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_urllib.pytsD4           fixes/fix_ws_comma.py000064400000002107147204472210010712 0ustar00"""Fixer that changes 'a ,b' into 'a, b'. This also changes '{a :b}' into '{a: b}', but does not touch other uses of colons. It does not touch other uses of whitespace. """ from .. import pytree from ..pgen2 import token from .. import fixer_base class FixWsComma(fixer_base.BaseFix): explicit = True # The user must ask for this fixers PATTERN = """ any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> """ COMMA = pytree.Leaf(token.COMMA, u",") COLON = pytree.Leaf(token.COLON, u":") SEPS = (COMMA, COLON) def transform(self, node, results): new = node.clone() comma = False for child in new.children: if child in self.SEPS: prefix = child.prefix if prefix.isspace() and u"\n" not in prefix: child.prefix = u"" comma = True else: if comma: prefix = child.prefix if not prefix: child.prefix = u" " comma = False return new fixes/fix_ws_comma.pyo000064400000002564147204472210011100 0ustar00 {fc@sSdZddlmZddlmZddlmZdejfdYZdS(sFixer that changes 'a ,b' into 'a, b'. This also changes '{a :b}' into '{a: b}', but does not touch other uses of colons. It does not touch other uses of whitespace. i(tpytree(ttoken(t fixer_baset FixWsCommacBsSeZeZdZejejdZejej dZ ee fZ dZ RS(sH any<(not(',') any)+ ',' ((not(',') any)+ ',')* [not(',') any]> u,u:cCs|j}t}x|jD]u}||jkrg|j}|jr^d|kr^d|_nt}q|r|j}|sd|_qnt}qW|S(Nu uu (tclonetFalsetchildrentSEPStprefixtisspacetTrue(tselftnodetresultstnewtcommatchildR((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pyt transforms      ( t__name__t __module__R texplicittPATTERNRtLeafRtCOMMAtCOLONRR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pyR s  N(t__doc__tRtpgen2RRtBaseFixR(((s2/usr/lib64/python2.7/lib2to3/fixes/fix_ws_comma.pytsfixes/fix_xrange.py000064400000005213147204472210010372 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer that changes xrange(...) into range(...).""" # Local imports from .. import fixer_base from ..fixer_util import Name, Call, consuming_calls from .. import patcomp class FixXrange(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< (name='range'|name='xrange') trailer< '(' args=any ')' > rest=any* > """ def start_tree(self, tree, filename): super(FixXrange, self).start_tree(tree, filename) self.transformed_xranges = set() def finish_tree(self, tree, filename): self.transformed_xranges = None def transform(self, node, results): name = results["name"] if name.value == u"xrange": return self.transform_xrange(node, results) elif name.value == u"range": return self.transform_range(node, results) else: raise ValueError(repr(name)) def transform_xrange(self, node, results): name = results["name"] name.replace(Name(u"range", prefix=name.prefix)) # This prevents the new range call from being wrapped in a list later. self.transformed_xranges.add(id(node)) def transform_range(self, node, results): if (id(node) not in self.transformed_xranges and not self.in_special_context(node)): range_call = Call(Name(u"range"), [results["args"].clone()]) # Encase the range call in list(). list_call = Call(Name(u"list"), [range_call], prefix=node.prefix) # Put things that were after the range() call after the list call. for n in results["rest"]: list_call.append_child(n) return list_call P1 = "power< func=NAME trailer< '(' node=any ')' > any* >" p1 = patcomp.compile_pattern(P1) P2 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > | comparison< any 'in' node=any any*> """ p2 = patcomp.compile_pattern(P2) def in_special_context(self, node): if node.parent is None: return False results = {} if (node.parent.parent is not None and self.p1.match(node.parent.parent, results) and results["node"] is node): # list(d.keys()) -> list(d.keys()), etc. return results["func"].value in consuming_calls # for ... in d.iterkeys() -> for ... in d.keys(), etc. return self.p2.match(node.parent, results) and results["node"] is node fixes/fix_xrange.pyo000064400000006017147204472210010554 0ustar00 {fc@s_dZddlmZddlmZmZmZddlmZdejfdYZ dS(s/Fixer that changes xrange(...) into range(...).i(t fixer_base(tNametCalltconsuming_calls(tpatcompt FixXrangecBsteZeZdZdZdZdZdZdZ dZ e j e Z dZe j eZdZRS( s power< (name='range'|name='xrange') trailer< '(' args=any ')' > rest=any* > cCs)tt|j||t|_dS(N(tsuperRt start_treetsetttransformed_xranges(tselfttreetfilename((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyRscCs d|_dS(N(tNoneR (R R R ((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyt finish_treescCs^|d}|jdkr)|j||S|jdkrH|j||Stt|dS(Ntnameuxrangeurange(tvaluettransform_xrangettransform_ranget ValueErrortrepr(R tnodetresultsR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyt transforms  cCs@|d}|jtdd|j|jjt|dS(NRurangetprefix(treplaceRRR taddtid(R RRR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR$s cCst||jkr|j| rttd|djg}ttd|gd|j}x|dD]}|j|qsW|SdS(NurangetargsulistRtrest(RR tin_special_contextRRtcloneRt append_child(R RRt range_callt list_calltn((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR*s" s3power< func=NAME trailer< '(' node=any ')' > any* >sfor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > | comparison< any 'in' node=any any*> cCs|jdkrtSi}|jjdk rg|jj|jj|rg|d|krg|djtkS|jj|j|o|d|kS(NRtfunc(tparentR tFalsetp1tmatchRRtp2(R RR((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR?s(t__name__t __module__tTruet BM_compatibletPATTERNRRRRRtP1Rtcompile_patternR'tP2R)R(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pyR s    N( t__doc__tRt fixer_utilRRRRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_xrange.pytsfixes/fix_xreadlines.py000064400000001262147204472210011244 0ustar00"""Fix "for x in f.xreadlines()" -> "for x in f". This fixer will also convert g(f.xreadlines) into g(f.__iter__).""" # Author: Collin Winter # Local imports from .. import fixer_base from ..fixer_util import Name class FixXreadlines(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< call=any+ trailer< '.' 'xreadlines' > trailer< '(' ')' > > | power< any+ trailer< '.' no_call='xreadlines' > > """ def transform(self, node, results): no_call = results.get("no_call") if no_call: no_call.replace(Name(u"__iter__", prefix=no_call.prefix)) else: node.replace([x.clone() for x in results["call"]]) fixes/fix_zip.py000064400000001610147204472210007705 0ustar00""" Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. """ # Local imports from .. import fixer_base from ..fixer_util import Name, Call, in_special_context class FixZip(fixer_base.ConditionalFix): BM_compatible = True PATTERN = """ power< 'zip' args=trailer< '(' [any] ')' > > """ skip_on = "future_builtins.zip" def transform(self, node, results): if self.should_skip(node): return if in_special_context(node): return None new = node.clone() new.prefix = u"" new = Call(Name(u"list"), [new]) new.prefix = node.prefix return new fixes/fix_zip.pyo000064400000002520147204472210010065 0ustar00 {fc@sOdZddlmZddlmZmZmZdejfdYZdS(s7 Fixer that changes zip(seq0, seq1, ...) into list(zip(seq0, seq1, ...) unless there exists a 'from future_builtins import zip' statement in the top-level namespace. We avoid the transformation if the zip() call is directly contained in iter(<>), list(<>), tuple(<>), sorted(<>), ...join(<>), or for V in <>:. i(t fixer_base(tNametCalltin_special_contexttFixZipcBs#eZeZdZdZdZRS(s: power< 'zip' args=trailer< '(' [any] ')' > > sfuture_builtins.zipcCs`|j|rdSt|r#dS|j}d|_ttd|g}|j|_|S(Nuulist(t should_skipRtNonetclonetprefixRR(tselftnodetresultstnew((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pyt transforms    (t__name__t __module__tTruet BM_compatibletPATTERNtskip_onR (((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pyRsN( t__doc__tRt fixer_utilRRRtConditionalFixR(((s-/usr/lib64/python2.7/lib2to3/fixes/fix_zip.pytsfixes/__init__.py000064400000000057147204472210010000 0ustar00# Dummy file to make this directory a package. fixes/fix_reduce.pyo000064400000002374147204472210010541 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(sqFixer for reduce(). Makes sure reduce() is imported from the functools module if reduce is used in that module. i(t fixer_base(t touch_importt FixReducecBs#eZeZdZdZdZRS(tpresi power< 'reduce' trailer< '(' arglist< ( (not(argument) any ',' not(argument > cCstdd|dS(Nu functoolsureduce(R(tselftnodetresults((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pyt transform"s(t__name__t __module__tTruet BM_compatibletordertPATTERNR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pyRsN(t__doc__tlib2to3Rtlib2to3.fixer_utilRtBaseFixR(((s0/usr/lib64/python2.7/lib2to3/fixes/fix_reduce.pytsfixes/fix_renames.py000064400000004252147204472210010542 0ustar00"""Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize """ # Author: Christian Heimes # based on Collin Winter's fix_import # Local imports from .. import fixer_base from ..fixer_util import Name, attr_chain MAPPING = {"sys": {"maxint" : "maxsize"}, } LOOKUP = {} def alternates(members): return "(" + "|".join(map(repr, members)) + ")" def build_pattern(): #bare = set() for module, replace in MAPPING.items(): for old_attr, new_attr in replace.items(): LOOKUP[(module, old_attr)] = new_attr #bare.add(module) #bare.add(old_attr) #yield """ # import_name< 'import' (module=%r # | dotted_as_names< any* module=%r any* >) > # """ % (module, module) yield """ import_from< 'from' module_name=%r 'import' ( attr_name=%r | import_as_name< attr_name=%r 'as' any >) > """ % (module, old_attr, old_attr) yield """ power< module_name=%r trailer< '.' attr_name=%r > any* > """ % (module, old_attr) #yield """bare_name=%s""" % alternates(bare) class FixRenames(fixer_base.BaseFix): BM_compatible = True PATTERN = "|".join(build_pattern()) order = "pre" # Pre-order tree traversal # Don't match the node if it's within another match def match(self, node): match = super(FixRenames, self).match results = match(node) if results: if any(match(obj) for obj in attr_chain(node, "parent")): return False return results return False #def start_tree(self, tree, filename): # super(FixRenames, self).start_tree(tree, filename) # self.replace = {} def transform(self, node, results): mod_name = results.get("module_name") attr_name = results.get("attr_name") #bare_name = results.get("bare_name") #import_mod = results.get("module") if mod_name and attr_name: new_attr = unicode(LOOKUP[(mod_name.value, attr_name.value)]) attr_name.replace(Name(new_attr, prefix=attr_name.prefix)) fixes/fix_renames.pyo000064400000004647147204472210010731 0ustar00 {fc@sudZddlmZddlmZmZiidd6d6ZiZdZdZ d ej fd YZ d S( s?Fix incompatible renames Fixes: * sys.maxint -> sys.maxsize i(t fixer_base(tNamet attr_chaintmaxsizetmaxinttsyscCsddjtt|dS(Nt(t|t)(tjointmaptrepr(tmembers((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt alternatessccsoxhtjD]Z\}}xK|jD]=\}}|t||f) > s^ power< module_name=%r trailer< '.' attr_name=%r > any* > (tMAPPINGtitemstLOOKUP(tmoduletreplacetold_attrtnew_attr((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt build_patterns  t FixRenamescBs8eZeZdjeZdZdZdZ RS(RtprecsUtt|j|}|rQtfdt|dDrMtS|StS(Nc3s|]}|VqdS(N((t.0tobj(tmatch(s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pys 5stparent(tsuperRRtanyRtFalse(tselftnodetresults((Rs1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyR1s %cCsi|jd}|jd}|re|rett|j|jf}|jt|d|jndS(Nt module_namet attr_nametprefix(tgettunicodeRtvalueRRR$(RR R!tmod_nameR#R((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyt transform>s  ( t__name__t __module__tTruet BM_compatibleR RtPATTERNtorderRR)(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyR*s  N( t__doc__tRt fixer_utilRRRRR RtBaseFixR(((s1/usr/lib64/python2.7/lib2to3/fixes/fix_renames.pyts  fixes/fix_set_literal.py000064400000003243147204472210011416 0ustar00""" Optional fixer to transform set() calls to set literals. """ # Author: Benjamin Peterson from lib2to3 import fixer_base, pytree from lib2to3.fixer_util import token, syms class FixSetLiteral(fixer_base.BaseFix): BM_compatible = True explicit = True PATTERN = """power< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > """ def transform(self, node, results): single = results.get("single") if single: # Make a fake listmaker fake = pytree.Node(syms.listmaker, [single.clone()]) single.replace(fake) items = fake else: items = results["items"] # Build the contents of the literal literal = [pytree.Leaf(token.LBRACE, u"{")] literal.extend(n.clone() for n in items.children) literal.append(pytree.Leaf(token.RBRACE, u"}")) # Set the prefix of the right brace to that of the ')' or ']' literal[-1].prefix = items.next_sibling.prefix maker = pytree.Node(syms.dictsetmaker, literal) maker.prefix = node.prefix # If the original was a one tuple, we need to remove the extra comma. if len(maker.children) == 4: n = maker.children[2] n.remove() maker.children[-1].prefix = n.prefix # Finally, replace the set call with our shiny new literal. return maker fixes/fix_set_literal.pyo000064400000003724147204472210011601 0ustar00 {fc@sOdZddlmZmZddlmZmZdejfdYZdS(s: Optional fixer to transform set() calls to set literals. i(t fixer_basetpytree(ttokentsymst FixSetLiteralcBs#eZeZeZdZdZRS(sjpower< 'set' trailer< '(' (atom=atom< '[' (items=listmaker< any ((',' any)* [',']) > | single=any) ']' > | atom< '(' items=testlist_gexp< any ((',' any)* [',']) > ')' > ) ')' > > c Cs|jd}|rItjtj|jg}|j||}n |d}tjtj dg}|j d|j D|j tjtj d|jj|d_tjtj|}|j|_t|j dkr|j d}|j|j|j d_n|S( Ntsingletitemsu{css|]}|jVqdS(N(tclone(t.0tn((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pys 'su}iii(tgetRtNodeRt listmakerRtreplacetLeafRtLBRACEtextendtchildrentappendtRBRACEt next_siblingtprefixt dictsetmakertlentremove( tselftnodetresultsRtfakeRtliteraltmakerR ((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pyt transforms"      (t__name__t __module__tTruet BM_compatibletexplicittPATTERNR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pyR s N( t__doc__tlib2to3RRtlib2to3.fixer_utilRRtBaseFixR(((s5/usr/lib64/python2.7/lib2to3/fixes/fix_set_literal.pytsfixes/fix_standarderror.py000064400000000702147204472210011756 0ustar00# Copyright 2007 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Fixer for StandardError -> Exception.""" # Local imports from .. import fixer_base from ..fixer_util import Name class FixStandarderror(fixer_base.BaseFix): BM_compatible = True PATTERN = """ 'StandardError' """ def transform(self, node, results): return Name(u"Exception", prefix=node.prefix) fixes/fix_standarderror.pyo000064400000001543147204472210012141 0ustar00 {fc@sCdZddlmZddlmZdejfdYZdS(s%Fixer for StandardError -> Exception.i(t fixer_base(tNametFixStandarderrorcBseZeZdZdZRS(s- 'StandardError' cCstdd|jS(Nu Exceptiontprefix(RR(tselftnodetresults((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pyt transforms(t__name__t __module__tTruet BM_compatibletPATTERNR(((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pyR sN(t__doc__tRt fixer_utilRtBaseFixR(((s7/usr/lib64/python2.7/lib2to3/fixes/fix_standarderror.pytsfixes/fix_sys_exc.py000064400000002017147204472210010562 0ustar00"""Fixer for sys.exc_{type, value, traceback} sys.exc_type -> sys.exc_info()[0] sys.exc_value -> sys.exc_info()[1] sys.exc_traceback -> sys.exc_info()[2] """ # By Jeff Balogh and Benjamin Peterson # Local imports from .. import fixer_base from ..fixer_util import Attr, Call, Name, Number, Subscript, Node, syms class FixSysExc(fixer_base.BaseFix): # This order matches the ordering of sys.exc_info(). exc_info = [u"exc_type", u"exc_value", u"exc_traceback"] BM_compatible = True PATTERN = """ power< 'sys' trailer< dot='.' attribute=(%s) > > """ % '|'.join("'%s'" % e for e in exc_info) def transform(self, node, results): sys_attr = results["attribute"][0] index = Number(self.exc_info.index(sys_attr.value)) call = Call(Name(u"exc_info"), prefix=sys_attr.prefix) attr = Attr(Name(u"sys"), call) attr[1].children[0].prefix = results["dot"].prefix attr.append(Subscript(index)) return Node(syms.power, attr, prefix=node.prefix) fixes/fix_throw.py000064400000003062147204472210010251 0ustar00"""Fixer for generator.throw(E, V, T). g.throw(E) -> g.throw(E) g.throw(E, V) -> g.throw(E(V)) g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) g.throw("foo"[, V[, T]]) will warn about string exceptions.""" # Author: Collin Winter # Local imports from .. import pytree from ..pgen2 import token from .. import fixer_base from ..fixer_util import Name, Call, ArgList, Attr, is_tuple class FixThrow(fixer_base.BaseFix): BM_compatible = True PATTERN = """ power< any trailer< '.' 'throw' > trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > > | power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > """ def transform(self, node, results): syms = self.syms exc = results["exc"].clone() if exc.type is token.STRING: self.cannot_convert(node, "Python 3 does not support string exceptions") return # Leave "g.throw(E)" alone val = results.get(u"val") if val is None: return val = val.clone() if is_tuple(val): args = [c.clone() for c in val.children[1:-1]] else: val.prefix = u"" args = [val] throw_args = results["args"] if "tb" in results: tb = results["tb"].clone() tb.prefix = u"" e = Call(exc, args) with_tb = Attr(e, Name(u'with_traceback')) + [ArgList([tb])] throw_args.replace(pytree.Node(syms.power, with_tb)) else: throw_args.replace(Call(exc, args)) fixes/fix_throw.pyo000064400000003732147204472210010434 0ustar00 {fc@s{dZddlmZddlmZddlmZddlmZmZm Z m Z m Z dej fdYZ dS( sFixer for generator.throw(E, V, T). g.throw(E) -> g.throw(E) g.throw(E, V) -> g.throw(E(V)) g.throw(E, V, T) -> g.throw(E(V).with_traceback(T)) g.throw("foo"[, V[, T]]) will warn about string exceptions.i(tpytree(ttoken(t fixer_base(tNametCalltArgListtAttrtis_tupletFixThrowcBseZeZdZdZRS(s power< any trailer< '.' 'throw' > trailer< '(' args=arglist< exc=any ',' val=any [',' tb=any] > ')' > > | power< any trailer< '.' 'throw' > trailer< '(' exc=any ')' > > c CsP|j}|dj}|jtjkr?|j|ddS|jd}|dkr^dS|j}t|rg|j dd!D]}|j^q}nd|_ |g}|d}d|kr6|dj} d| _ t ||} t | t d t| gg} |jtj|j| n|jt ||dS( Ntexcs+Python 3 does not support string exceptionsuvaliiutargsttbuwith_traceback(tsymstclonettypeRtSTRINGtcannot_converttgettNoneRtchildrentprefixRRRRtreplaceRtNodetpower( tselftnodetresultsR R tvaltcR t throw_argsR tetwith_tb((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyt transforms*    ,     %(t__name__t __module__tTruet BM_compatibletPATTERNR (((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyRsN(t__doc__tRtpgen2RRt fixer_utilRRRRRtBaseFixR(((s//usr/lib64/python2.7/lib2to3/fixes/fix_throw.pyts (pgen2/__init__.py000064400000000217147204472210007673 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """The pgen2 package.""" pgen2/__init__.pyc000064400000000256147204472210010041 0ustar00 {fc@s dZdS(sThe pgen2 package.N(t__doc__(((s./usr/lib64/python2.7/lib2to3/pgen2/__init__.pyttpgen2/__init__.pyo000064400000000256147204472210010055 0ustar00 {fc@s dZdS(sThe pgen2 package.N(t__doc__(((s./usr/lib64/python2.7/lib2to3/pgen2/__init__.pyttpgen2/conv.py000064400000022633147204472210007107 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Convert graminit.[ch] spit out by pgen to Python code. Pgen is the Python parser generator. It is useful to quickly create a parser from a grammar file in Python's grammar notation. But I don't want my parsers to be written in C (yet), so I'm translating the parsing tables to Python data structures and writing a Python parse engine. Note that the token numbers are constants determined by the standard Python tokenizer. The standard token module defines these numbers and their names (the names are not used much). The token numbers are hardcoded into the Python tokenizer and into pgen. A Python implementation of the Python tokenizer is also available, in the standard tokenize module. On the other hand, symbol numbers (representing the grammar's non-terminals) are assigned by pgen based on the actual grammar input. Note: this module is pretty much obsolete; the pgen module generates equivalent grammar tables directly from the Grammar.txt input file without having to invoke the Python pgen C program. """ # Python imports import re # Local imports from pgen2 import grammar, token class Converter(grammar.Grammar): """Grammar subclass that reads classic pgen output files. The run() method reads the tables as produced by the pgen parser generator, typically contained in two C files, graminit.h and graminit.c. The other methods are for internal use only. See the base class for more documentation. """ def run(self, graminit_h, graminit_c): """Load the grammar tables from the text files written by pgen.""" self.parse_graminit_h(graminit_h) self.parse_graminit_c(graminit_c) self.finish_off() def parse_graminit_h(self, filename): """Parse the .h file written by pgen. (Internal) This file is a sequence of #define statements defining the nonterminals of the grammar as numbers. We build two tables mapping the numbers to names and back. """ try: f = open(filename) except IOError, err: print "Can't open %s: %s" % (filename, err) return False self.symbol2number = {} self.number2symbol = {} lineno = 0 for line in f: lineno += 1 mo = re.match(r"^#define\s+(\w+)\s+(\d+)$", line) if not mo and line.strip(): print "%s(%s): can't parse %s" % (filename, lineno, line.strip()) else: symbol, number = mo.groups() number = int(number) assert symbol not in self.symbol2number assert number not in self.number2symbol self.symbol2number[symbol] = number self.number2symbol[number] = symbol return True def parse_graminit_c(self, filename): """Parse the .c file written by pgen. (Internal) The file looks as follows. The first two lines are always this: #include "pgenheaders.h" #include "grammar.h" After that come four blocks: 1) one or more state definitions 2) a table defining dfas 3) a table defining labels 4) a struct defining the grammar A state definition has the following form: - one or more arc arrays, each of the form: static arc arcs__[] = { {, }, ... }; - followed by a state array, of the form: static state states_[] = { {, arcs__}, ... }; """ try: f = open(filename) except IOError, err: print "Can't open %s: %s" % (filename, err) return False # The code below essentially uses f's iterator-ness! lineno = 0 # Expect the two #include lines lineno, line = lineno+1, f.next() assert line == '#include "pgenheaders.h"\n', (lineno, line) lineno, line = lineno+1, f.next() assert line == '#include "grammar.h"\n', (lineno, line) # Parse the state definitions lineno, line = lineno+1, f.next() allarcs = {} states = [] while line.startswith("static arc "): while line.startswith("static arc "): mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) n, m, k = map(int, mo.groups()) arcs = [] for _ in range(k): lineno, line = lineno+1, f.next() mo = re.match(r"\s+{(\d+), (\d+)},$", line) assert mo, (lineno, line) i, j = map(int, mo.groups()) arcs.append((i, j)) lineno, line = lineno+1, f.next() assert line == "};\n", (lineno, line) allarcs[(n, m)] = arcs lineno, line = lineno+1, f.next() mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line) assert mo, (lineno, line) s, t = map(int, mo.groups()) assert s == len(states), (lineno, line) state = [] for _ in range(t): lineno, line = lineno+1, f.next() mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line) assert mo, (lineno, line) k, n, m = map(int, mo.groups()) arcs = allarcs[n, m] assert k == len(arcs), (lineno, line) state.append(arcs) states.append(state) lineno, line = lineno+1, f.next() assert line == "};\n", (lineno, line) lineno, line = lineno+1, f.next() self.states = states # Parse the dfas dfas = {} mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line) assert mo, (lineno, line) ndfas = int(mo.group(1)) for i in range(ndfas): lineno, line = lineno+1, f.next() mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$', line) assert mo, (lineno, line) symbol = mo.group(2) number, x, y, z = map(int, mo.group(1, 3, 4, 5)) assert self.symbol2number[symbol] == number, (lineno, line) assert self.number2symbol[number] == symbol, (lineno, line) assert x == 0, (lineno, line) state = states[z] assert y == len(state), (lineno, line) lineno, line = lineno+1, f.next() mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line) assert mo, (lineno, line) first = {} rawbitset = eval(mo.group(1)) for i, c in enumerate(rawbitset): byte = ord(c) for j in range(8): if byte & (1<@rd||d|_[] = { {, }, ... }; - followed by a state array, of the form: static state states_[] = { {, arcs__}, ... }; sCan't open %s: %siis#include "pgenheaders.h" s#include "grammar.h" s static arc s)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$s\s+{(\d+), (\d+)},$s}; s'static state states_(\d+)\[(\d+)\] = {$s\s+{(\d+), arcs_(\d+)_(\d+)},$sstatic dfa dfas\[(\d+)\] = {$s0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$iiiis\s+("(?:\\\d\d\d)*")},$is!static label labels\[(\d+)\] = {$s\s+{(\d+), (0|"\w+")},$t0sgrammar _PyParser_Grammar = { s \s+(\d+),$s dfas, s\s+{(\d+), labels},$s \s+(\d+)$N(R R R tnextRt startswithRRtmapRRtrangetappendtlentstatestgroupR Rtevalt enumeratetordtdfastNonetlabelststartt StopIteration(!RRRRRRtallarcsR%Rtntmtktarcst_titjtstttstateR*tndfasRRtxtytztfirstt rawbitsettctbyteR,tnlabelsR-((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyRTs  $$    -%% $       '!  cCsi|_i|_xjt|jD]Y\}\}}|tjkr_|dk r_||j|s pgen2/conv.pyo000064400000015575147204472210007275 0ustar00 {fc@sEdZddlZddlmZmZdejfdYZdS(sConvert graminit.[ch] spit out by pgen to Python code. Pgen is the Python parser generator. It is useful to quickly create a parser from a grammar file in Python's grammar notation. But I don't want my parsers to be written in C (yet), so I'm translating the parsing tables to Python data structures and writing a Python parse engine. Note that the token numbers are constants determined by the standard Python tokenizer. The standard token module defines these numbers and their names (the names are not used much). The token numbers are hardcoded into the Python tokenizer and into pgen. A Python implementation of the Python tokenizer is also available, in the standard tokenize module. On the other hand, symbol numbers (representing the grammar's non-terminals) are assigned by pgen based on the actual grammar input. Note: this module is pretty much obsolete; the pgen module generates equivalent grammar tables directly from the Grammar.txt input file without having to invoke the Python pgen C program. iN(tgrammarttokent ConvertercBs2eZdZdZdZdZdZRS(s2Grammar subclass that reads classic pgen output files. The run() method reads the tables as produced by the pgen parser generator, typically contained in two C files, graminit.h and graminit.c. The other methods are for internal use only. See the base class for more documentation. cCs(|j||j||jdS(s<Load the grammar tables from the text files written by pgen.N(tparse_graminit_htparse_graminit_ct finish_off(tselft graminit_ht graminit_c((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pytrun/s  c Csyt|}Wn#tk r5}d||fGHtSXi|_i|_d}x|D]}|d7}tjd|}| r|jrd|||jfGHqU|j\}}t |}||j|<||j|@rd||d|_[] = { {, }, ... }; - followed by a state array, of the form: static state states_[] = { {, arcs__}, ... }; sCan't open %s: %siis static arc s)static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$s\s+{(\d+), (\d+)},$s'static state states_(\d+)\[(\d+)\] = {$s\s+{(\d+), arcs_(\d+)_(\d+)},$sstatic dfa dfas\[(\d+)\] = {$s0\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$iiiis\s+("(?:\\\d\d\d)*")},$is!static label labels\[(\d+)\] = {$s\s+{(\d+), (0|"\w+")},$t0s \s+(\d+),$s\s+{(\d+), labels},$s \s+(\d+)$N(R R R tnextt startswithRRtmapRRtrangetappendtstatestgrouptevalt enumeratetordtdfastNonetlabelststartt StopIteration(!RRRRRRtallarcsR#Rtntmtktarcst_titjtstttstateR(tndfasRRtxtytztfirstt rawbitsettctbyteR*tnlabelsR+((s*/usr/lib64/python2.7/lib2to3/pgen2/conv.pyRTs      -          cCsi|_i|_xjt|jD]Y\}\}}|tjkr_|dk r_||j|s pgen2/driver.py000064400000013631147204472210007433 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Modifications: # Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Parser driver. This provides a high-level interface to parse a file into a syntax tree. """ __author__ = "Guido van Rossum " __all__ = ["Driver", "load_grammar"] # Python imports import codecs import os import logging import pkgutil import StringIO import sys # Pgen imports from . import grammar, parse, token, tokenize, pgen class Driver(object): def __init__(self, grammar, convert=None, logger=None): self.grammar = grammar if logger is None: logger = logging.getLogger() self.logger = logger self.convert = convert def parse_tokens(self, tokens, debug=False): """Parse a series of tokens and return the syntax tree.""" # XXX Move the prefix computation into a wrapper around tokenize. p = parse.Parser(self.grammar, self.convert) p.setup() lineno = 1 column = 0 type = value = start = end = line_text = None prefix = u"" for quintuple in tokens: type, value, start, end, line_text = quintuple if start != (lineno, column): assert (lineno, column) <= start, ((lineno, column), start) s_lineno, s_column = start if lineno < s_lineno: prefix += "\n" * (s_lineno - lineno) lineno = s_lineno column = 0 if column < s_column: prefix += line_text[column:s_column] column = s_column if type in (tokenize.COMMENT, tokenize.NL): prefix += value lineno, column = end if value.endswith("\n"): lineno += 1 column = 0 continue if type == token.OP: type = grammar.opmap[value] if debug: self.logger.debug("%s %r (prefix=%r)", token.tok_name[type], value, prefix) if p.addtoken(type, value, (prefix, start)): if debug: self.logger.debug("Stop.") break prefix = "" lineno, column = end if value.endswith("\n"): lineno += 1 column = 0 else: # We never broke out -- EOF is too soon (how can this happen???) raise parse.ParseError("incomplete input", type, value, (prefix, start)) return p.rootnode def parse_stream_raw(self, stream, debug=False): """Parse a stream and return the syntax tree.""" tokens = tokenize.generate_tokens(stream.readline) return self.parse_tokens(tokens, debug) def parse_stream(self, stream, debug=False): """Parse a stream and return the syntax tree.""" return self.parse_stream_raw(stream, debug) def parse_file(self, filename, encoding=None, debug=False): """Parse a file and return the syntax tree.""" stream = codecs.open(filename, "r", encoding) try: return self.parse_stream(stream, debug) finally: stream.close() def parse_string(self, text, debug=False): """Parse a string and return the syntax tree.""" tokens = tokenize.generate_tokens(StringIO.StringIO(text).readline) return self.parse_tokens(tokens, debug) def _generate_pickle_name(gt): head, tail = os.path.splitext(gt) if tail == ".txt": tail = "" return head + tail + ".".join(map(str, sys.version_info)) + ".pickle" def load_grammar(gt="Grammar.txt", gp=None, save=True, force=False, logger=None): """Load the grammar (maybe from a pickle).""" if logger is None: logger = logging.getLogger() gp = _generate_pickle_name(gt) if gp is None else gp if force or not _newer(gp, gt): logger.info("Generating grammar tables from %s", gt) g = pgen.generate_grammar(gt) if save: logger.info("Writing grammar tables to %s", gp) try: g.dump(gp) except IOError as e: logger.info("Writing failed: %s", e) else: g = grammar.Grammar() g.load(gp) return g def _newer(a, b): """Inquire whether file a was written since file b.""" if not os.path.exists(a): return False if not os.path.exists(b): return True return os.path.getmtime(a) >= os.path.getmtime(b) def load_packaged_grammar(package, grammar_source): """Normally, loads a pickled grammar by doing pkgutil.get_data(package, pickled_grammar) where *pickled_grammar* is computed from *grammar_source* by adding the Python version and using a ``.pickle`` extension. However, if *grammar_source* is an extant file, load_grammar(grammar_source) is called instead. This facilitates using a packaged grammar file when needed but preserves load_grammar's automatic regeneration behavior when possible. """ if os.path.isfile(grammar_source): return load_grammar(grammar_source) pickled_name = _generate_pickle_name(os.path.basename(grammar_source)) data = pkgutil.get_data(package, pickled_name) g = grammar.Grammar() g.loads(data) return g def main(*args): """Main program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. """ if not args: args = sys.argv[1:] logging.basicConfig(level=logging.INFO, stream=sys.stdout, format='%(message)s') for gt in args: load_grammar(gt, save=True, force=True) return True if __name__ == "__main__": sys.exit(int(not main())) pgen2/driver.pyc000064400000014477147204472210007607 0ustar00 {fc@sdZdZddgZddlZddlZddlZddlZddlZddlZddl m Z m Z m Z m Z mZdefdYZd Zd deedd Zd Zd ZdZedkrejee ndS(sZParser driver. This provides a high-level interface to parse a file into a syntax tree. s#Guido van Rossum tDrivert load_grammariNi(tgrammartparsettokenttokenizetpgencBsVeZdddZedZedZedZdedZedZ RS(cCs:||_|dkr$tj}n||_||_dS(N(RtNonetloggingt getLoggertloggertconvert(tselfRR R ((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt__init__ s    cCs=tj|j|j}|jd}d}d }}}} } d} x|D]} | \}}}} } |||fkr ||f|kst||f|f|\} }|| kr| d| |7} | }d}n||kr | | ||!7} |}q n|tjtj fkr`| |7} | \}}|j drQ|d7}d}qQqQn|t j krtj |}n|r|jjdt j||| n|j||| |fr|r|jjdnPnd} | \}}|j drQ|d7}d}qQqQWtjd||| |f|jS( s4Parse a series of tokens and return the syntax tree.iius s%s %r (prefix=%r)sStop.tsincomplete inputN(RtParserRR tsetupRtAssertionErrorRtCOMMENTtNLtendswithRtOPtopmapR tdebugttok_nametaddtokent ParseErrortrootnode(R ttokensRtptlinenotcolumnttypetvaluetstarttendt line_texttprefixt quintuplets_linenots_column((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_tokens'sT  *              cCs"tj|j}|j||S(s*Parse a stream and return the syntax tree.(Rtgenerate_tokenstreadlineR)(R tstreamRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream_rawWscCs|j||S(s*Parse a stream and return the syntax tree.(R-(R R,R((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_stream\scCs;tj|d|}z|j||SWd|jXdS(s(Parse a file and return the syntax tree.trN(tcodecstopenR.tclose(R tfilenametencodingRR,((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_file`scCs+tjtj|j}|j||S(s*Parse a string and return the syntax tree.(RR*tStringIOR+R)(R ttextRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_stringhsN( t__name__t __module__RR tFalseR)R-R.R5R8(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRs  0  cCsRtjj|\}}|dkr-d}n||djtttjdS(Ns.txtRt.s.pickle(tostpathtsplitexttjointmaptstrtsyst version_info(tgttheadttail((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt_generate_pickle_namens  s Grammar.txtcCs|dkrtj}n|dkr3t|n|}|sOt|| r|jd|tj|}|r|jd|y|j|Wqt k r}|jd|qXqnt j }|j ||S(s'Load the grammar (maybe from a pickle).s!Generating grammar tables from %ssWriting grammar tables to %ssWriting failed: %sN( RRR RHt_newertinfoRtgenerate_grammartdumptIOErrorRtGrammartload(REtgptsavetforceR tgte((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRus   cCsNtjj|stStjj|s,tStjj|tjj|kS(s0Inquire whether file a was written since file b.(R=R>texistsR;tTruetgetmtime(tatb((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRIs cCsctjj|rt|Sttjj|}tj||}tj }|j ||S(sNormally, loads a pickled grammar by doing pkgutil.get_data(package, pickled_grammar) where *pickled_grammar* is computed from *grammar_source* by adding the Python version and using a ``.pickle`` extension. However, if *grammar_source* is an extant file, load_grammar(grammar_source) is called instead. This facilitates using a packaged grammar file when needed but preserves load_grammar's automatic regeneration behavior when possible. ( R=R>tisfileRRHtbasenametpkgutiltget_dataRRNtloads(tpackagetgrammar_sourcet pickled_nametdataRS((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytload_packaged_grammars    cGsc|stjd}ntjdtjdtjddx$|D]}t|dtdtq?WtS(sMain program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. itlevelR,tformats %(message)sRQRR(RCtargvRt basicConfigtINFOtstdoutRRV(targsRE((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytmains t__main__(t__doc__t __author__t__all__R0R=RR\R6RCRRRRRRtobjectRRHRRVR;RRIRcRkR9texittint(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt s$       (P   pgen2/driver.pyo000064400000014400147204472210007605 0ustar00 {fc@sdZdZddgZddlZddlZddlZddlZddlZddlZddl m Z m Z m Z m Z mZdefdYZd Zd deedd Zd Zd ZdZedkrejee ndS(sZParser driver. This provides a high-level interface to parse a file into a syntax tree. s#Guido van Rossum tDrivert load_grammariNi(tgrammartparsettokenttokenizetpgencBsVeZdddZedZedZedZdedZedZ RS(cCs:||_|dkr$tj}n||_||_dS(N(RtNonetloggingt getLoggertloggertconvert(tselfRR R ((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt__init__ s    cCstj|j|j}|jd}d}d }}}} } d} x|D]} | \}}}} } |||fkr|\} }|| kr| d| |7} | }d}n||kr| | ||!7} |}qn|tjtjfkr6| |7} | \}}|j drQ|d7}d}qQqQn|t j krUtj |}n|r~|j jdt j||| n|j||| |fr|r|j jdnPnd} | \}}|j drQ|d7}d}qQqQWtjd||| |f|jS( s4Parse a series of tokens and return the syntax tree.iius s%s %r (prefix=%r)sStop.tsincomplete inputN(RtParserRR tsetupRRtCOMMENTtNLtendswithRtOPtopmapR tdebugttok_nametaddtokent ParseErrortrootnode(R ttokensRtptlinenotcolumnttypetvaluetstarttendt line_texttprefixt quintuplets_linenots_column((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_tokens'sR                cCs"tj|j}|j||S(s*Parse a stream and return the syntax tree.(Rtgenerate_tokenstreadlineR((R tstreamRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytparse_stream_rawWscCs|j||S(s*Parse a stream and return the syntax tree.(R,(R R+R((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_stream\scCs;tj|d|}z|j||SWd|jXdS(s(Parse a file and return the syntax tree.trN(tcodecstopenR-tclose(R tfilenametencodingRR+((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_file`scCs+tjtj|j}|j||S(s*Parse a string and return the syntax tree.(RR)tStringIOR*R((R ttextRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt parse_stringhsN( t__name__t __module__RR tFalseR(R,R-R4R7(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRs  0  cCsRtjj|\}}|dkr-d}n||djtttjdS(Ns.txtRt.s.pickle(tostpathtsplitexttjointmaptstrtsyst version_info(tgttheadttail((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt_generate_pickle_namens  s Grammar.txtcCs|dkrtj}n|dkr3t|n|}|sOt|| r|jd|tj|}|r|jd|y|j|Wqt k r}|jd|qXqnt j }|j ||S(s'Load the grammar (maybe from a pickle).s!Generating grammar tables from %ssWriting grammar tables to %ssWriting failed: %sN( RRR RGt_newertinfoRtgenerate_grammartdumptIOErrorRtGrammartload(RDtgptsavetforceR tgte((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRus   cCsNtjj|stStjj|s,tStjj|tjj|kS(s0Inquire whether file a was written since file b.(R<R=texistsR:tTruetgetmtime(tatb((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyRHs cCsctjj|rt|Sttjj|}tj||}tj }|j ||S(sNormally, loads a pickled grammar by doing pkgutil.get_data(package, pickled_grammar) where *pickled_grammar* is computed from *grammar_source* by adding the Python version and using a ``.pickle`` extension. However, if *grammar_source* is an extant file, load_grammar(grammar_source) is called instead. This facilitates using a packaged grammar file when needed but preserves load_grammar's automatic regeneration behavior when possible. ( R<R=tisfileRRGtbasenametpkgutiltget_dataRRMtloads(tpackagetgrammar_sourcet pickled_nametdataRR((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytload_packaged_grammars    cGsc|stjd}ntjdtjdtjddx$|D]}t|dtdtq?WtS(sMain program, when run as a script: produce grammar pickle files. Calls load_grammar for each argument, a path to a grammar text file. itlevelR+tformats %(message)sRPRQ(RBtargvRt basicConfigtINFOtstdoutRRU(targsRD((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pytmains t__main__(t__doc__t __author__t__all__R/R<RR[R5RBRRRRRRtobjectRRGRRUR:RRHRbRjR8texittint(((s,/usr/lib64/python2.7/lib2to3/pgen2/driver.pyt s$       (P   pgen2/grammar.py000064400000014702147204472210007566 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """This module defines the data structures used to represent a grammar. These are a bit arcane because they are derived from the data structures used by Python's 'pgen' parser generator. There's also a table here mapping operators to their names in the token module; the Python tokenize module reports all operators as the fallback token code OP, but the parser needs the actual token code. """ # Python imports import collections import pickle # Local imports from . import token, tokenize class Grammar(object): """Pgen parsing tables conversion class. Once initialized, this class supplies the grammar tables for the parsing engine implemented by parse.py. The parsing engine accesses the instance variables directly. The class here does not provide initialization of the tables; several subclasses exist to do this (see the conv and pgen modules). The load() method reads the tables from a pickle file, which is much faster than the other ways offered by subclasses. The pickle file is written by calling dump() (after loading the grammar tables using a subclass). The report() method prints a readable representation of the tables to stdout, for debugging. The instance variables are as follows: symbol2number -- a dict mapping symbol names to numbers. Symbol numbers are always 256 or higher, to distinguish them from token numbers, which are between 0 and 255 (inclusive). number2symbol -- a dict mapping numbers to symbol names; these two are each other's inverse. states -- a list of DFAs, where each DFA is a list of states, each state is a list of arcs, and each arc is a (i, j) pair where i is a label and j is a state number. The DFA number is the index into this list. (This name is slightly confusing.) Final states are represented by a special arc of the form (0, j) where j is its own state number. dfas -- a dict mapping symbol numbers to (DFA, first) pairs, where DFA is an item from the states list above, and first is a set of tokens that can begin this grammar rule (represented by a dict whose values are always 1). labels -- a list of (x, y) pairs where x is either a token number or a symbol number, and y is either None or a string; the strings are keywords. The label number is the index in this list; label numbers are used to mark state transitions (arcs) in the DFAs. start -- the number of the grammar's start symbol. keywords -- a dict mapping keyword strings to arc labels. tokens -- a dict mapping token numbers to arc labels. """ def __init__(self): self.symbol2number = {} self.number2symbol = {} self.states = [] self.dfas = {} self.labels = [(0, "EMPTY")] self.keywords = {} self.tokens = {} self.symbol2label = {} self.start = 256 def dump(self, filename): """Dump the grammar tables to a pickle file. dump() recursively changes all dict to OrderedDict, so the pickled file is not exactly the same as what was passed in to dump(). load() uses the pickled file to create the tables, but only changes OrderedDict to dict at the top level; it does not recursively change OrderedDict to dict. So, the loaded tables are different from the original tables that were passed to load() in that some of the OrderedDict (from the pickled file) are not changed back to dict. For parsing, this has no effect on performance because OrderedDict uses dict's __getitem__ with nothing in between. """ with open(filename, "wb") as f: d = _make_deterministic(self.__dict__) pickle.dump(d, f, 2) def load(self, filename): """Load the grammar tables from a pickle file.""" f = open(filename, "rb") d = pickle.load(f) f.close() self.__dict__.update(d) def loads(self, pkl): """Load the grammar tables from a pickle bytes object.""" self.__dict__.update(pickle.loads(pkl)) def copy(self): """ Copy the grammar. """ new = self.__class__() for dict_attr in ("symbol2number", "number2symbol", "dfas", "keywords", "tokens", "symbol2label"): setattr(new, dict_attr, getattr(self, dict_attr).copy()) new.labels = self.labels[:] new.states = self.states[:] new.start = self.start return new def report(self): """Dump the grammar tables to standard output, for debugging.""" from pprint import pprint print "s2n" pprint(self.symbol2number) print "n2s" pprint(self.number2symbol) print "states" pprint(self.states) print "dfas" pprint(self.dfas) print "labels" pprint(self.labels) print "start", self.start def _make_deterministic(top): if isinstance(top, dict): return collections.OrderedDict( sorted(((k, _make_deterministic(v)) for k, v in top.iteritems()))) if isinstance(top, list): return [_make_deterministic(e) for e in top] if isinstance(top, tuple): return tuple(_make_deterministic(e) for e in top) return top # Map from operator to number (since tokenize doesn't do this) opmap_raw = """ ( LPAR ) RPAR [ LSQB ] RSQB : COLON , COMMA ; SEMI + PLUS - MINUS * STAR / SLASH | VBAR & AMPER < LESS > GREATER = EQUAL . DOT % PERCENT ` BACKQUOTE { LBRACE } RBRACE @ AT @= ATEQUAL == EQEQUAL != NOTEQUAL <> NOTEQUAL <= LESSEQUAL >= GREATEREQUAL ~ TILDE ^ CIRCUMFLEX << LEFTSHIFT >> RIGHTSHIFT ** DOUBLESTAR += PLUSEQUAL -= MINEQUAL *= STAREQUAL /= SLASHEQUAL %= PERCENTEQUAL &= AMPEREQUAL |= VBAREQUAL ^= CIRCUMFLEXEQUAL <<= LEFTSHIFTEQUAL >>= RIGHTSHIFTEQUAL **= DOUBLESTAREQUAL // DOUBLESLASH //= DOUBLESLASHEQUAL -> RARROW """ opmap = {} for line in opmap_raw.splitlines(): if line: op, name = line.split() opmap[op] = getattr(token, name) pgen2/grammar.pyc000064400000017004147204472210007727 0ustar00 {fc@sdZddlZddlZddlmZmZdefdYZdZdZ iZ xBe j D]4Z e rle j \ZZeeee et|d)}t|j}tj||dWdQXdS(sDump the grammar tables to a pickle file. dump() recursively changes all dict to OrderedDict, so the pickled file is not exactly the same as what was passed in to dump(). load() uses the pickled file to create the tables, but only changes OrderedDict to dict at the top level; it does not recursively change OrderedDict to dict. So, the loaded tables are different from the original tables that were passed to load() in that some of the OrderedDict (from the pickled file) are not changed back to dict. For parsing, this has no effect on performance because OrderedDict uses dict's __getitem__ with nothing in between. twbiN(topent_make_deterministict__dict__tpickletdump(R tfilenametftd((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRXs cCs<t|d}tj|}|j|jj|dS(s+Load the grammar tables from a pickle file.trbN(RRtloadtcloseRtupdate(R RRR((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRis cCs|jjtj|dS(s3Load the grammar tables from a pickle bytes object.N(RRRtloads(R tpkl((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRpscCsf|j}x-dD]%}t||t||jqW|j|_|j|_|j|_|S(s# Copy the grammar. RRRR R R (RRRR R R (t __class__tsetattrtgetattrtcopyRRR (R tnewt dict_attr((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR!ts  #   cCszddlm}dGH||jdGH||jdGH||jdGH||jdGH||jdG|jGHd S( s:Dump the grammar tables to standard output, for debugging.i(tpprintts2ntn2sRRRR N(R$RRRRRR (R R$((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pytreports     ( t__name__t __module__t__doc__RRRRR!R'(((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRs4    cCst|tr2tjtd|jDSt|tr^g|D]}t|^qHSt|trtd|DS|S(Ncss'|]\}}|t|fVqdS(N(R(t.0tktv((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys scss|]}t|VqdS(N(R(R+te((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys s( t isinstancetdictt collectionst OrderedDicttsortedt iteritemstlistRttuple(ttopR.((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRss ( LPAR ) RPAR [ LSQB ] RSQB : COLON , COMMA ; SEMI + PLUS - MINUS * STAR / SLASH | VBAR & AMPER < LESS > GREATER = EQUAL . DOT % PERCENT ` BACKQUOTE { LBRACE } RBRACE @ AT @= ATEQUAL == EQEQUAL != NOTEQUAL <> NOTEQUAL <= LESSEQUAL >= GREATEREQUAL ~ TILDE ^ CIRCUMFLEX << LEFTSHIFT >> RIGHTSHIFT ** DOUBLESTAR += PLUSEQUAL -= MINEQUAL *= STAREQUAL /= SLASHEQUAL %= PERCENTEQUAL &= AMPEREQUAL |= VBAREQUAL ^= CIRCUMFLEXEQUAL <<= LEFTSHIFTEQUAL >>= RIGHTSHIFTEQUAL **= DOUBLESTAREQUAL // DOUBLESLASH //= DOUBLESLASHEQUAL -> RARROW (R*R1RtRRtobjectRRt opmap_rawtopmapt splitlinestlinetsplittoptnameR (((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt s  z =pgen2/grammar.pyo000064400000017004147204472210007743 0ustar00 {fc@sdZddlZddlZddlmZmZdefdYZdZdZ iZ xBe j D]4Z e rle j \ZZeeee et|d)}t|j}tj||dWdQXdS(sDump the grammar tables to a pickle file. dump() recursively changes all dict to OrderedDict, so the pickled file is not exactly the same as what was passed in to dump(). load() uses the pickled file to create the tables, but only changes OrderedDict to dict at the top level; it does not recursively change OrderedDict to dict. So, the loaded tables are different from the original tables that were passed to load() in that some of the OrderedDict (from the pickled file) are not changed back to dict. For parsing, this has no effect on performance because OrderedDict uses dict's __getitem__ with nothing in between. twbiN(topent_make_deterministict__dict__tpickletdump(R tfilenametftd((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRXs cCs<t|d}tj|}|j|jj|dS(s+Load the grammar tables from a pickle file.trbN(RRtloadtcloseRtupdate(R RRR((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRis cCs|jjtj|dS(s3Load the grammar tables from a pickle bytes object.N(RRRtloads(R tpkl((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRpscCsf|j}x-dD]%}t||t||jqW|j|_|j|_|j|_|S(s# Copy the grammar. RRRR R R (RRRR R R (t __class__tsetattrtgetattrtcopyRRR (R tnewt dict_attr((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyR!ts  #   cCszddlm}dGH||jdGH||jdGH||jdGH||jdGH||jdG|jGHd S( s:Dump the grammar tables to standard output, for debugging.i(tpprintts2ntn2sRRRR N(R$RRRRRR (R R$((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pytreports     ( t__name__t __module__t__doc__RRRRR!R'(((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRs4    cCst|tr2tjtd|jDSt|tr^g|D]}t|^qHSt|trtd|DS|S(Ncss'|]\}}|t|fVqdS(N(R(t.0tktv((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys scss|]}t|VqdS(N(R(R+te((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pys s( t isinstancetdictt collectionst OrderedDicttsortedt iteritemstlistRttuple(ttopR.((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyRss ( LPAR ) RPAR [ LSQB ] RSQB : COLON , COMMA ; SEMI + PLUS - MINUS * STAR / SLASH | VBAR & AMPER < LESS > GREATER = EQUAL . DOT % PERCENT ` BACKQUOTE { LBRACE } RBRACE @ AT @= ATEQUAL == EQEQUAL != NOTEQUAL <> NOTEQUAL <= LESSEQUAL >= GREATEREQUAL ~ TILDE ^ CIRCUMFLEX << LEFTSHIFT >> RIGHTSHIFT ** DOUBLESTAR += PLUSEQUAL -= MINEQUAL *= STAREQUAL /= SLASHEQUAL %= PERCENTEQUAL &= AMPEREQUAL |= VBAREQUAL ^= CIRCUMFLEXEQUAL <<= LEFTSHIFTEQUAL >>= RIGHTSHIFTEQUAL **= DOUBLESTAREQUAL // DOUBLESLASH //= DOUBLESLASHEQUAL -> RARROW (R*R1RtRRtobjectRRt opmap_rawtopmapt splitlinestlinetsplittoptnameR (((s-/usr/lib64/python2.7/lib2to3/pgen2/grammar.pyt s  z =pgen2/literals.py000064400000003116147204472210007754 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Safely evaluate Python string literals without using eval().""" import re simple_escapes = {"a": "\a", "b": "\b", "f": "\f", "n": "\n", "r": "\r", "t": "\t", "v": "\v", "'": "'", '"': '"', "\\": "\\"} def escape(m): all, tail = m.group(0, 1) assert all.startswith("\\") esc = simple_escapes.get(tail) if esc is not None: return esc if tail.startswith("x"): hexes = tail[1:] if len(hexes) < 2: raise ValueError("invalid hex string escape ('\\%s')" % tail) try: i = int(hexes, 16) except ValueError: raise ValueError("invalid hex string escape ('\\%s')" % tail) else: try: i = int(tail, 8) except ValueError: raise ValueError("invalid octal string escape ('\\%s')" % tail) return chr(i) def evalString(s): assert s.startswith("'") or s.startswith('"'), repr(s[:1]) q = s[0] if s[:3] == q*3: q = q*3 assert s.endswith(q), repr(s[-len(q):]) assert len(s) >= 2*len(q) s = s[len(q):-len(q)] return re.sub(r"\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})", escape, s) def test(): for i in range(256): c = chr(i) s = repr(c) e = evalString(s) if e != c: print i, c, s, e if __name__ == "__main__": test() pgen2/literals.pyc000064400000003742147204472210010124 0ustar00 {fc@sdZddlZi dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6ZdZdZdZedkrendS(s<Safely evaluate Python string literals without using eval().iNstastbs tfs tns trs tts tvt't"s\cCs|jdd\}}|jds-ttj|}|dk rL|S|jdr|d}t|dkrtd|nyt|d}Wqtk rtd|qXn7yt|d}Wn!tk rtd |nXt |S( Niis\txis!invalid hex string escape ('\%s')iis#invalid octal string escape ('\%s')( tgroupt startswithtAssertionErrortsimple_escapestgettNonetlent ValueErrortinttchr(tmtallttailtescthexesti((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pytescapes$    cCs|jds4|jds4tt|d |d}|d |dkr_|d}n|j|stt|t| t|dt|kst|t|t| !}tjdt|S(NRRiiiis)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})(R R treprtendswithRtretsubR(tstq((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt evalString(s4  ,"cCs_xXtdD]J}t|}t|}t|}||kr |G|G|G|GHq q WdS(Ni(trangeRRR!(RtcRte((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyttest2s     t__main__(t__doc__RR RR!R%t__name__(((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyts      pgen2/literals.pyo000064400000003372147204472210010137 0ustar00 {fc@sdZddlZi dd6dd6dd6d d 6d d 6d d6dd6dd6dd6dd6ZdZdZdZedkrendS(s<Safely evaluate Python string literals without using eval().iNstastbs tfs tns trs tts tvt't"s\cCs|jdd\}}tj|}|dk r7|S|jdr|d}t|dkrutd|nyt|d}Wqtk rtd|qXn7yt|d}Wn!tk rtd|nXt|S( Niitxis!invalid hex string escape ('\%s')iis#invalid octal string escape ('\%s')( tgrouptsimple_escapestgettNonet startswithtlent ValueErrortinttchr(tmtallttailtescthexesti((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pytescapes"    cCsX|d}|d |dkr+|d}n|t|t| !}tjdt|S(Niis)\\(\'|\"|\\|[abfnrtv]|x.{0,2}|[0-7]{1,3})(RtretsubR(tstq((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyt evalString(s   cCs_xXtdD]J}t|}t|}t|}||kr |G|G|G|GHq q WdS(Ni(trangeRtreprR(RtcRte((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyttest2s     t__main__(t__doc__RR RRR#t__name__(((s./usr/lib64/python2.7/lib2to3/pgen2/literals.pyts      pgen2/parse.py000064400000017565147204472210007264 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Parser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. """ # Local imports from . import token class ParseError(Exception): """Exception to signal the parser is stuck.""" def __init__(self, msg, type, value, context): Exception.__init__(self, "%s: type=%r, value=%r, context=%r" % (msg, type, value, context)) self.msg = msg self.type = type self.value = value self.context = context class Parser(object): """Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing : if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). """ def __init__(self, grammar, convert=None): """Constructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. """ self.grammar = grammar self.convert = convert or (lambda grammar, node: node) def setup(self, start=None): """Prepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. """ if start is None: start = self.grammar.start # Each stack entry is a tuple: (dfa, state, node). # A node is a tuple: (type, value, context, children), # where children is a list of nodes or None, and context may be None. newnode = (start, None, None, []) stackentry = (self.grammar.dfas[start], 0, newnode) self.stack = [stackentry] self.rootnode = None self.used_names = set() # Aliased to self.rootnode.used_names in pop() def addtoken(self, type, value, context): """Add a token; return True iff this is the end of the program.""" # Map from token to label ilabel = self.classify(type, value, context) # Loop until the token is shifted; may raise exceptions while True: dfa, state, node = self.stack[-1] states, first = dfa arcs = states[state] # Look for a state with this label for i, newstate in arcs: t, v = self.grammar.labels[i] if ilabel == i: # Look it up in the list of labels assert t < 256 # Shift a token; we're done with it self.shift(type, value, newstate, context) # Pop while we are in an accept-only state state = newstate while states[state] == [(0, state)]: self.pop() if not self.stack: # Done parsing! return True dfa, state, node = self.stack[-1] states, first = dfa # Done with this token return False elif t >= 256: # See if it's a symbol and if we're in its first set itsdfa = self.grammar.dfas[t] itsstates, itsfirst = itsdfa if ilabel in itsfirst: # Push a symbol self.push(t, self.grammar.dfas[t], newstate, context) break # To continue the outer while loop else: if (0, state) in arcs: # An accepting state, pop it and try something else self.pop() if not self.stack: # Done parsing, but another token is input raise ParseError("too much input", type, value, context) else: # No success finding a transition raise ParseError("bad input", type, value, context) def classify(self, type, value, context): """Turn a token into a label. (Internal)""" if type == token.NAME: # Keep a listing of all used names self.used_names.add(value) # Check for reserved words ilabel = self.grammar.keywords.get(value) if ilabel is not None: return ilabel ilabel = self.grammar.tokens.get(type) if ilabel is None: raise ParseError("bad token", type, value, context) return ilabel def shift(self, type, value, newstate, context): """Shift a token. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, value, context, None) newnode = self.convert(self.grammar, newnode) if newnode is not None: node[-1].append(newnode) self.stack[-1] = (dfa, newstate, node) def push(self, type, newdfa, newstate, context): """Push a nonterminal. (Internal)""" dfa, state, node = self.stack[-1] newnode = (type, None, context, []) self.stack[-1] = (dfa, newstate, node) self.stack.append((newdfa, 0, newnode)) def pop(self): """Pop a nonterminal. (Internal)""" popdfa, popstate, popnode = self.stack.pop() newnode = self.convert(self.grammar, popnode) if newnode is not None: if self.stack: dfa, state, node = self.stack[-1] node[-1].append(newnode) else: self.rootnode = newnode self.rootnode.used_names = self.used_names pgen2/parse.pyc000064400000016104147204472210007413 0ustar00 {fc@sFdZddlmZdefdYZdefdYZdS(sParser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. i(ttokent ParseErrorcBseZdZdZRS(s(Exception to signal the parser is stuck.cCsHtj|d||||f||_||_||_||_dS(Ns!%s: type=%r, value=%r, context=%r(t Exceptiont__init__tmsgttypetvaluetcontext(tselfRRRR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs     (t__name__t __module__t__doc__R(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRstParsercBsSeZdZddZddZdZdZdZdZ dZ RS( s5Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing : if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). cCs||_|pd|_dS(sConstructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. cSs|S(N((tgrammartnode((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytWtN(R tconvert(RR R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR9s cCsk|dkr|jj}n|ddgf}|jj|d|f}|g|_d|_t|_dS(sPrepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. iN(tNoneR tstarttdfaststacktrootnodetsett used_names(RRtnewnodet stackentry((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytsetupYs   cCs|j|||}xtr|jd\}}}|\}} ||} xq| D]\} } |jj| \} }|| kr | dkst|j||| || }xV||d|fgkr|j|jstS|jd\}}}|\}} qWtS| dkrQ|jj | }|\}}||kre|j | |jj | | |PqeqQqQWd|f| kr|j|jst d|||qqt d|||qWdS(s<Add a token; return True iff this is the end of the program.iiistoo much inputs bad inputN( tclassifytTrueRR tlabelstAssertionErrortshifttpoptFalseRtpushR(RRRRtilabeltdfatstateRtstatestfirsttarcstitnewstatetttvtitsdfat itsstatestitsfirst((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytaddtokenqs<             cCs|tjkrG|jj||jjj|}|dk rG|Sn|jjj|}|dkrt d|||n|S(s&Turn a token into a label. (Internal)s bad tokenN( RtNAMERtaddR tkeywordstgetRttokensR(RRRRR$((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs  c Csw|jd\}}}|||df}|j|j|}|dk r]|dj|n|||f|jd s pgen2/parse.pyo000064400000016035147204472210007432 0ustar00 {fc@sFdZddlmZdefdYZdefdYZdS(sParser engine for the grammar tables generated by pgen. The grammar table must be loaded first. See Parser/parser.c in the Python distribution for additional info on how this parsing engine works. i(ttokent ParseErrorcBseZdZdZRS(s(Exception to signal the parser is stuck.cCsHtj|d||||f||_||_||_||_dS(Ns!%s: type=%r, value=%r, context=%r(t Exceptiont__init__tmsgttypetvaluetcontext(tselfRRRR((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs     (t__name__t __module__t__doc__R(((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRstParsercBsSeZdZddZddZdZdZdZdZ dZ RS( s5Parser engine. The proper usage sequence is: p = Parser(grammar, [converter]) # create instance p.setup([start]) # prepare for parsing : if p.addtoken(...): # parse a token; may raise ParseError break root = p.rootnode # root of abstract syntax tree A Parser instance may be reused by calling setup() repeatedly. A Parser instance contains state pertaining to the current token sequence, and should not be used concurrently by different threads to parse separate token sequences. See driver.py for how to get input tokens by tokenizing a file or string. Parsing is complete when addtoken() returns True; the root of the abstract syntax tree can then be retrieved from the rootnode instance variable. When a syntax error occurs, addtoken() raises the ParseError exception. There is no error recovery; the parser cannot be used after a syntax error was reported (but it can be reinitialized by calling setup()). cCs||_|pd|_dS(sConstructor. The grammar argument is a grammar.Grammar instance; see the grammar module for more information. The parser is not ready yet for parsing; you must call the setup() method to get it started. The optional convert argument is a function mapping concrete syntax tree nodes to abstract syntax tree nodes. If not given, no conversion is done and the syntax tree produced is the concrete syntax tree. If given, it must be a function of two arguments, the first being the grammar (a grammar.Grammar instance), and the second being the concrete syntax tree node to be converted. The syntax tree is converted from the bottom up. A concrete syntax tree node is a (type, value, context, nodes) tuple, where type is the node type (a token or symbol number), value is None for symbols and a string for tokens, context is None or an opaque value used for error reporting (typically a (lineno, offset) pair), and nodes is a list of children for symbols, and None for tokens. An abstract syntax tree node may be anything; this is entirely up to the converter function. cSs|S(N((tgrammartnode((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytWtN(R tconvert(RR R((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyR9s cCsk|dkr|jj}n|ddgf}|jj|d|f}|g|_d|_t|_dS(sPrepare for parsing. This *must* be called before starting to parse. The optional argument is an alternative start symbol; it defaults to the grammar's start symbol. You can use a Parser instance to parse any number of programs; each time you call setup() the parser is reset to an initial state determined by the (implicit or explicit) start symbol. iN(tNoneR tstarttdfaststacktrootnodetsett used_names(RRtnewnodet stackentry((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytsetupYs   cCs|j|||}xtr|jd\}}}|\}} ||} x_| D]\} } |jj| \} }|| kr|j||| || }xV||d|fgkr|j|jstS|jd\}}}|\}} qWtS| dkrQ|jj| }|\}}||krS|j | |jj| | |PqSqQqQWd|f| kr|j|jst d|||qqt d|||qWdS(s<Add a token; return True iff this is the end of the program.iiistoo much inputs bad inputN( tclassifytTrueRR tlabelstshifttpoptFalseRtpushR(RRRRtilabeltdfatstateRtstatestfirsttarcstitnewstatetttvtitsdfat itsstatestitsfirst((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pytaddtokenqs:             cCs|tjkrG|jj||jjj|}|dk rG|Sn|jjj|}|dkrt d|||n|S(s&Turn a token into a label. (Internal)s bad tokenN( RtNAMERtaddR tkeywordstgetRttokensR(RRRRR#((s+/usr/lib64/python2.7/lib2to3/pgen2/parse.pyRs  c Csw|jd\}}}|||df}|j|j|}|dk r]|dj|n|||f|jd s pgen2/pgen.py000064400000033002147204472210007063 0ustar00# Copyright 2004-2005 Elemental Security, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # Pgen imports from . import grammar, token, tokenize class PgenGrammar(grammar.Grammar): pass class ParserGenerator(object): def __init__(self, filename, stream=None): close_stream = None if stream is None: stream = open(filename) close_stream = stream.close self.filename = filename self.stream = stream self.generator = tokenize.generate_tokens(stream.readline) self.gettoken() # Initialize lookahead self.dfas, self.startsymbol = self.parse() if close_stream is not None: close_stream() self.first = {} # map from symbol name to set of tokens self.addfirstsets() def make_grammar(self): c = PgenGrammar() names = self.dfas.keys() names.sort() names.remove(self.startsymbol) names.insert(0, self.startsymbol) for name in names: i = 256 + len(c.symbol2number) c.symbol2number[name] = i c.number2symbol[i] = name for name in names: dfa = self.dfas[name] states = [] for state in dfa: arcs = [] for label, next in sorted(state.arcs.iteritems()): arcs.append((self.make_label(c, label), dfa.index(next))) if state.isfinal: arcs.append((0, dfa.index(state))) states.append(arcs) c.states.append(states) c.dfas[c.symbol2number[name]] = (states, self.make_first(c, name)) c.start = c.symbol2number[self.startsymbol] return c def make_first(self, c, name): rawfirst = self.first[name] first = {} for label in sorted(rawfirst): ilabel = self.make_label(c, label) ##assert ilabel not in first # XXX failed on <> ... != first[ilabel] = 1 return first def make_label(self, c, label): # XXX Maybe this should be a method on a subclass of converter? ilabel = len(c.labels) if label[0].isalpha(): # Either a symbol name or a named token if label in c.symbol2number: # A symbol name (a non-terminal) if label in c.symbol2label: return c.symbol2label[label] else: c.labels.append((c.symbol2number[label], None)) c.symbol2label[label] = ilabel return ilabel else: # A named token (NAME, NUMBER, STRING) itoken = getattr(token, label, None) assert isinstance(itoken, (int, long)), label assert itoken in token.tok_name, label if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel else: # Either a keyword or an operator assert label[0] in ('"', "'"), label value = eval(label) if value[0].isalpha(): # A keyword if value in c.keywords: return c.keywords[value] else: c.labels.append((token.NAME, value)) c.keywords[value] = ilabel return ilabel else: # An operator (any non-numeric token) itoken = grammar.opmap[value] # Fails if unknown token if itoken in c.tokens: return c.tokens[itoken] else: c.labels.append((itoken, None)) c.tokens[itoken] = ilabel return ilabel def addfirstsets(self): names = self.dfas.keys() names.sort() for name in names: if name not in self.first: self.calcfirst(name) #print name, self.first[name].keys() def calcfirst(self, name): dfa = self.dfas[name] self.first[name] = None # dummy to detect left recursion state = dfa[0] totalset = {} overlapcheck = {} for label, next in state.arcs.iteritems(): if label in self.dfas: if label in self.first: fset = self.first[label] if fset is None: raise ValueError("recursion for rule %r" % name) else: self.calcfirst(label) fset = self.first[label] totalset.update(fset) overlapcheck[label] = fset else: totalset[label] = 1 overlapcheck[label] = {label: 1} inverse = {} for label, itsfirst in overlapcheck.iteritems(): for symbol in itsfirst: if symbol in inverse: raise ValueError("rule %s is ambiguous; %s is in the" " first sets of %s as well as %s" % (name, symbol, label, inverse[symbol])) inverse[symbol] = label self.first[name] = totalset def parse(self): dfas = {} startsymbol = None # MSTART: (NEWLINE | RULE)* ENDMARKER while self.type != token.ENDMARKER: while self.type == token.NEWLINE: self.gettoken() # RULE: NAME ':' RHS NEWLINE name = self.expect(token.NAME) self.expect(token.OP, ":") a, z = self.parse_rhs() self.expect(token.NEWLINE) #self.dump_nfa(name, a, z) dfa = self.make_dfa(a, z) #self.dump_dfa(name, dfa) oldlen = len(dfa) self.simplify_dfa(dfa) newlen = len(dfa) dfas[name] = dfa #print name, oldlen, newlen if startsymbol is None: startsymbol = name return dfas, startsymbol def make_dfa(self, start, finish): # To turn an NFA into a DFA, we define the states of the DFA # to correspond to *sets* of states of the NFA. Then do some # state reduction. Let's represent sets as dicts with 1 for # values. assert isinstance(start, NFAState) assert isinstance(finish, NFAState) def closure(state): base = {} addclosure(state, base) return base def addclosure(state, base): assert isinstance(state, NFAState) if state in base: return base[state] = 1 for label, next in state.arcs: if label is None: addclosure(next, base) states = [DFAState(closure(start), finish)] for state in states: # NB states grows while we're iterating arcs = {} for nfastate in state.nfaset: for label, next in nfastate.arcs: if label is not None: addclosure(next, arcs.setdefault(label, {})) for label, nfaset in sorted(arcs.iteritems()): for st in states: if st.nfaset == nfaset: break else: st = DFAState(nfaset, finish) states.append(st) state.addarc(st, label) return states # List of DFAState instances; first one is start def dump_nfa(self, name, start, finish): print "Dump of NFA for", name todo = [start] for i, state in enumerate(todo): print " State", i, state is finish and "(final)" or "" for label, next in state.arcs: if next in todo: j = todo.index(next) else: j = len(todo) todo.append(next) if label is None: print " -> %d" % j else: print " %s -> %d" % (label, j) def dump_dfa(self, name, dfa): print "Dump of DFA for", name for i, state in enumerate(dfa): print " State", i, state.isfinal and "(final)" or "" for label, next in sorted(state.arcs.iteritems()): print " %s -> %d" % (label, dfa.index(next)) def simplify_dfa(self, dfa): # This is not theoretically optimal, but works well enough. # Algorithm: repeatedly look for two states that have the same # set of arcs (same labels pointing to the same nodes) and # unify them, until things stop changing. # dfa is a list of DFAState instances changes = True while changes: changes = False for i, state_i in enumerate(dfa): for j in range(i+1, len(dfa)): state_j = dfa[j] if state_i == state_j: #print " unify", i, j del dfa[j] for state in dfa: state.unifystate(state_j, state_i) changes = True break def parse_rhs(self): # RHS: ALT ('|' ALT)* a, z = self.parse_alt() if self.value != "|": return a, z else: aa = NFAState() zz = NFAState() aa.addarc(a) z.addarc(zz) while self.value == "|": self.gettoken() a, z = self.parse_alt() aa.addarc(a) z.addarc(zz) return aa, zz def parse_alt(self): # ALT: ITEM+ a, b = self.parse_item() while (self.value in ("(", "[") or self.type in (token.NAME, token.STRING)): c, d = self.parse_item() b.addarc(c) b = d return a, b def parse_item(self): # ITEM: '[' RHS ']' | ATOM ['+' | '*'] if self.value == "[": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, "]") a.addarc(z) return a, z else: a, z = self.parse_atom() value = self.value if value not in ("+", "*"): return a, z self.gettoken() z.addarc(a) if value == "+": return a, z else: return a, a def parse_atom(self): # ATOM: '(' RHS ')' | NAME | STRING if self.value == "(": self.gettoken() a, z = self.parse_rhs() self.expect(token.OP, ")") return a, z elif self.type in (token.NAME, token.STRING): a = NFAState() z = NFAState() a.addarc(z, self.value) self.gettoken() return a, z else: self.raise_error("expected (...) or NAME or STRING, got %s/%s", self.type, self.value) def expect(self, type, value=None): if self.type != type or (value is not None and self.value != value): self.raise_error("expected %s/%s, got %s/%s", type, value, self.type, self.value) value = self.value self.gettoken() return value def gettoken(self): tup = self.generator.next() while tup[0] in (tokenize.COMMENT, tokenize.NL): tup = self.generator.next() self.type, self.value, self.begin, self.end, self.line = tup #print token.tok_name[self.type], repr(self.value) def raise_error(self, msg, *args): if args: try: msg = msg % args except: msg = " ".join([msg] + map(str, args)) raise SyntaxError(msg, (self.filename, self.end[0], self.end[1], self.line)) class NFAState(object): def __init__(self): self.arcs = [] # list of (label, NFAState) pairs def addarc(self, next, label=None): assert label is None or isinstance(label, str) assert isinstance(next, NFAState) self.arcs.append((label, next)) class DFAState(object): def __init__(self, nfaset, final): assert isinstance(nfaset, dict) assert isinstance(iter(nfaset).next(), NFAState) assert isinstance(final, NFAState) self.nfaset = nfaset self.isfinal = final in nfaset self.arcs = {} # map from label to DFAState def addarc(self, next, label): assert isinstance(label, str) assert label not in self.arcs assert isinstance(next, DFAState) self.arcs[label] = next def unifystate(self, old, new): for label, next in self.arcs.iteritems(): if next is old: self.arcs[label] = new def __eq__(self, other): # Equality test -- ignore the nfaset instance variable assert isinstance(other, DFAState) if self.isfinal != other.isfinal: return False # Can't just return self.arcs == other.arcs, because that # would invoke this method recursively, with cycles... if len(self.arcs) != len(other.arcs): return False for label, next in self.arcs.iteritems(): if next is not other.arcs.get(label): return False return True __hash__ = None # For Py3 compatibility. def generate_grammar(filename="Grammar.txt"): p = ParserGenerator(filename) return p.make_grammar() pgen2/pgen.pyc000064400000027716147204472210007245 0ustar00 {fc@sddlmZmZmZdejfdYZdefdYZdefdYZdefd YZ d d Z d S( i(tgrammarttokenttokenizet PgenGrammarcBseZRS((t__name__t __module__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRstParserGeneratorcBseZddZdZdZdZdZdZdZ dZ dZ d Z d Z d Zd Zd ZdZddZdZdZRS(cCsd}|dkr*t|}|j}n||_||_tj|j|_|j |j \|_ |_ |dk r|ni|_ |jdS(N(tNonetopentclosetfilenametstreamRtgenerate_tokenstreadlinet generatortgettokentparsetdfast startsymboltfirstt addfirstsets(tselfR R t close_stream((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__init__ s         c Cst}|jj}|j|j|j|jd|jx;|D]3}dt|j}||j|<||j | %ds %s -> %d(t enumerateR R$RR"R( RR+R(RdttodoR,R.R/R0tj((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_nfas       cCsdG|GHxtt|D]f\}}dG|G|jr9dp<dGHx;t|jjD]$\}}d||j|fGHqTWqWdS(NsDump of DFA fors States(final)Rgs %s -> %d(RhR%RR R!R$(RR+R-R,R.R/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_dfas  "cCst}x|rt}xt|D]x\}}xit|dt|D]N}||}||krH||=x|D]}|j||qrWt}PqHqHWq"Wq WdS(Ni(tTruetFalseRhtrangeRt unifystate(RR-tchangesR,tstate_iRjtstate_jR.((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRWs     cCs|j\}}|jdkr+||fSt}t}|j||j|xI|jdkr|j|j\}}|j||j|qZW||fSdS(Nt|(t parse_altRER_RcR(RRXRYtaatzz((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRUs       cCsr|j\}}xS|jdks?|jtjtjfkrg|j\}}|j||}qW||fS(Nt(t[(RxRy(t parse_itemRERPRRBtSTRINGRc(RRXtbR)td((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRu s  cCs|jdkrU|j|j\}}|jtjd|j|||fS|j\}}|j}|dkr||fS|j|j||dkr||fS||fSdS(NRyt]t+t*(RR(RERRURSRRTRct parse_atom(RRXRYRE((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRzs          cCs|jdkrH|j|j\}}|jtjd||fS|jtjtjfkrt }t }|j ||j|j||fS|j d|j|jdS(NRxt)s+expected (...) or NAME or STRING, got %s/%s( RERRURSRRTRPRBR{R_Rct raise_error(RRXRY((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR(s       cCsc|j|ks*|dk rL|j|krL|jd|||j|jn|j}|j|S(Nsexpected %s/%s, got %s/%s(RPRRERR(RRPRE((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRS9s *   cCsi|jj}x/|dtjtjfkr@|jj}qW|\|_|_|_|_|_ dS(Ni( RR0RtCOMMENTtNLRPREtbegintendtline(Rttup((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRAscGss|r@y||}Wq@dj|gtt|}q@Xnt||j|jd|jd|jfdS(Nt ii(tjointmaptstrt SyntaxErrorR RR(Rtmsgtargs((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRHs&N(RRRRR1R'R#RRFRRVRkRlRWRURuRzRRSRR(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR s$   .    $        R_cBseZdZddZRS(cCs g|_dS(N(R (R((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRSscCsP|dks!t|ts!tt|ts6t|jj||fdS(N(RR:RR=R_R R"(RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRcVs!N(RRRRRc(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR_Qs R`cBs2eZdZdZdZdZdZRS(cCspt|tsttt|jts6tt|tsKt||_||k|_i|_dS(N( R:tdictR=titerR0R_RaR%R (RRatfinal((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR]s ! cCsPt|tst||jks*tt|ts?t||j|s H %pgen2/pgen.pyo000064400000026551147204472210007255 0ustar00 {fc@sddlmZmZmZdejfdYZdefdYZdefdYZdefd YZ d d Z d S( i(tgrammarttokenttokenizet PgenGrammarcBseZRS((t__name__t __module__(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRstParserGeneratorcBseZddZdZdZdZdZdZdZ dZ dZ d Z d Z d Zd Zd ZdZddZdZdZRS(cCsd}|dkr*t|}|j}n||_||_tj|j|_|j |j \|_ |_ |dk r|ni|_ |jdS(N(tNonetopentclosetfilenametstreamRtgenerate_tokenstreadlinet generatortgettokentparsetdfast startsymboltfirstt addfirstsets(tselfR R t close_stream((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyt__init__ s         c Cst}|jj}|j|j|j|jd|jx;|D]3}dt|j}||j|<||j | %ds %s -> %d(t enumerateR R$RR"R( RR+R(R\ttodoR,R.R/R0tj((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_nfas       cCsdG|GHxtt|D]f\}}dG|G|jr9dp<dGHx;t|jjD]$\}}d||j|fGHqTWqWdS(NsDump of DFA fors States(final)R_s %s -> %d(R`R%RR R!R$(RR+R-R,R.R/R0((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pytdump_dfas  "cCst}x|rt}xt|D]x\}}xit|dt|D]N}||}||krH||=x|D]}|j||qrWt}PqHqHWq"Wq WdS(Ni(tTruetFalseR`trangeRt unifystate(RR-tchangesR,tstate_iRbtstate_jR.((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRPs     cCs|j\}}|jdkr+||fSt}t}|j||j|xI|jdkr|j|j\}}|j||j|qZW||fSdS(Nt|(t parse_altR>tNFAStateR[R(RRQRRtaatzz((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRNs       cCsr|j\}}xS|jdks?|jtjtjfkrg|j\}}|j||}qW||fS(Nt(t[(RqRr(t parse_itemR>RIRR;tSTRINGR[(RRQtbR)td((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRm s  cCs|jdkrU|j|j\}}|jtjd|j|||fS|j\}}|j}|dkr||fS|j|j||dkr||fS||fSdS(NRrt]t+t*(RxRy(R>RRNRLRRMR[t parse_atom(RRQRRR>((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRss          cCs|jdkrH|j|j\}}|jtjd||fS|jtjtjfkrt }t }|j ||j|j||fS|j d|j|jdS(NRqt)s+expected (...) or NAME or STRING, got %s/%s( R>RRNRLRRMRIR;RtRnR[t raise_error(RRQRR((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRz(s       cCsc|j|ks*|dk rL|j|krL|jd|||j|jn|j}|j|S(Nsexpected %s/%s, got %s/%s(RIRR>R|R(RRIR>((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRL9s *   cCsi|jj}x/|dtjtjfkr@|jj}qW|\|_|_|_|_|_ dS(Ni( RR0RtCOMMENTtNLRIR>tbegintendtline(Rttup((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRAscGss|r@y||}Wq@dj|gtt|}q@Xnt||j|jd|jd|jfdS(Nt ii(tjointmaptstrt SyntaxErrorR RR(Rtmsgtargs((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR|Hs&N(RRRRR1R'R#RR?RRORcRdRPRNRmRsRzRLRR|(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR s$   .    $        RncBseZdZddZRS(cCs g|_dS(N(R (R((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRSscCs|jj||fdS(N(R R"(RR0R/((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR[VsN(RRRRR[(((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyRnQs RXcBs2eZdZdZdZdZdZRS(cCs%||_||k|_i|_dS(N(RYR%R (RRYtfinal((s*/usr/lib64/python2.7/lib2to3/pgen2/pgen.pyR]s cCs||j|s H %pgen2/token.py000075500000002350147204472210007257 0ustar00#! /usr/bin/python2.7 """Token constants (from "token.h").""" # Taken from Python (r53757) and modified to include some tokens # originally monkeypatched in by pgen2.tokenize #--start constants-- ENDMARKER = 0 NAME = 1 NUMBER = 2 STRING = 3 NEWLINE = 4 INDENT = 5 DEDENT = 6 LPAR = 7 RPAR = 8 LSQB = 9 RSQB = 10 COLON = 11 COMMA = 12 SEMI = 13 PLUS = 14 MINUS = 15 STAR = 16 SLASH = 17 VBAR = 18 AMPER = 19 LESS = 20 GREATER = 21 EQUAL = 22 DOT = 23 PERCENT = 24 BACKQUOTE = 25 LBRACE = 26 RBRACE = 27 EQEQUAL = 28 NOTEQUAL = 29 LESSEQUAL = 30 GREATEREQUAL = 31 TILDE = 32 CIRCUMFLEX = 33 LEFTSHIFT = 34 RIGHTSHIFT = 35 DOUBLESTAR = 36 PLUSEQUAL = 37 MINEQUAL = 38 STAREQUAL = 39 SLASHEQUAL = 40 PERCENTEQUAL = 41 AMPEREQUAL = 42 VBAREQUAL = 43 CIRCUMFLEXEQUAL = 44 LEFTSHIFTEQUAL = 45 RIGHTSHIFTEQUAL = 46 DOUBLESTAREQUAL = 47 DOUBLESLASH = 48 DOUBLESLASHEQUAL = 49 AT = 50 ATEQUAL = 51 OP = 52 COMMENT = 53 NL = 54 RARROW = 55 ERRORTOKEN = 56 N_TOKENS = 57 NT_OFFSET = 256 #--end constants-- tok_name = {} for _name, _value in globals().items(): if type(_value) is type(0): tok_name[_value] = _name def ISTERMINAL(x): return x < NT_OFFSET def ISNONTERMINAL(x): return x >= NT_OFFSET def ISEOF(x): return x == ENDMARKER pgen2/token.pyc000064400000004377147204472210007432 0ustar00 {fc@sdZdZdZdZdZdZdZdZdZd Z d Z d Z d Z d Z dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;iZ<xBe=j>D]1\Z?Z@eAe@eAdkr~e?e<e@ZDd?S(@s!Token constants (from "token.h").iiiiiiiiii i i i i iiiiiiiiiiiiiiiiiii i!i"i#i$i%i&i'i(i)i*i+i,i-i.i/i0i1i2i3i4i5i6i7i8i9icCs |tkS(N(t NT_OFFSET(tx((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt ISTERMINALLscCs |tkS(N(R(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt ISNONTERMINALOscCs |tkS(N(t ENDMARKER(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pytISEOFRsN(Et__doc__RtNAMEtNUMBERtSTRINGtNEWLINEtINDENTtDEDENTtLPARtRPARtLSQBtRSQBtCOLONtCOMMAtSEMItPLUStMINUStSTARtSLASHtVBARtAMPERtLESStGREATERtEQUALtDOTtPERCENTt BACKQUOTEtLBRACEtRBRACEtEQEQUALtNOTEQUALt LESSEQUALt GREATEREQUALtTILDEt CIRCUMFLEXt LEFTSHIFTt RIGHTSHIFTt DOUBLESTARt PLUSEQUALtMINEQUALt STAREQUALt SLASHEQUALt PERCENTEQUALt AMPEREQUALt VBAREQUALtCIRCUMFLEXEQUALtLEFTSHIFTEQUALtRIGHTSHIFTEQUALtDOUBLESTAREQUALt DOUBLESLASHtDOUBLESLASHEQUALtATtATEQUALtOPtCOMMENTtNLtRARROWt ERRORTOKENtN_TOKENSRttok_nametglobalstitemst_namet_valuettypeRRR(((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyts  pgen2/token.pyo000064400000004377147204472210007446 0ustar00 {fc@sdZdZdZdZdZdZdZdZdZd Z d Z d Z d Z d Z dZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZdZd Z d!Z!d"Z"d#Z#d$Z$d%Z%d&Z&d'Z'd(Z(d)Z)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1d2Z2d3Z3d4Z4d5Z5d6Z6d7Z7d8Z8d9Z9d:Z:d;Z;iZ<xBe=j>D]1\Z?Z@eAe@eAdkr~e?e<e@ZDd?S(@s!Token constants (from "token.h").iiiiiiiiii i i i i iiiiiiiiiiiiiiiiiii i!i"i#i$i%i&i'i(i)i*i+i,i-i.i/i0i1i2i3i4i5i6i7i8i9icCs |tkS(N(t NT_OFFSET(tx((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt ISTERMINALLscCs |tkS(N(R(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyt ISNONTERMINALOscCs |tkS(N(t ENDMARKER(R((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pytISEOFRsN(Et__doc__RtNAMEtNUMBERtSTRINGtNEWLINEtINDENTtDEDENTtLPARtRPARtLSQBtRSQBtCOLONtCOMMAtSEMItPLUStMINUStSTARtSLASHtVBARtAMPERtLESStGREATERtEQUALtDOTtPERCENTt BACKQUOTEtLBRACEtRBRACEtEQEQUALtNOTEQUALt LESSEQUALt GREATEREQUALtTILDEt CIRCUMFLEXt LEFTSHIFTt RIGHTSHIFTt DOUBLESTARt PLUSEQUALtMINEQUALt STAREQUALt SLASHEQUALt PERCENTEQUALt AMPEREQUALt VBAREQUALtCIRCUMFLEXEQUALtLEFTSHIFTEQUALtRIGHTSHIFTEQUALtDOUBLESTAREQUALt DOUBLESLASHtDOUBLESLASHEQUALtATtATEQUALtOPtCOMMENTtNLtRARROWt ERRORTOKENtN_TOKENSRttok_nametglobalstitemst_namet_valuettypeRRR(((s+/usr/lib64/python2.7/lib2to3/pgen2/token.pyts  pgen2/tokenize.py000064400000045454147204472210010000 0ustar00# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006 Python Software Foundation. # All rights reserved. """Tokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.""" __author__ = 'Ka-Ping Yee ' __credits__ = \ 'GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip Montanaro' import string, re from codecs import BOM_UTF8, lookup from lib2to3.pgen2.token import * from . import token __all__ = [x for x in dir(token) if x[0] != '_'] + ["tokenize", "generate_tokens", "untokenize"] del token try: bytes except NameError: # Support bytes type in Python <= 2.5, so 2to3 turns itself into # valid Python 3 code. bytes = str def group(*choices): return '(' + '|'.join(choices) + ')' def any(*choices): return group(*choices) + '*' def maybe(*choices): return group(*choices) + '?' Whitespace = r'[ \f\t]*' Comment = r'#[^\r\n]*' Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment) Name = r'[a-zA-Z_]\w*' Binnumber = r'0[bB][01]*' Hexnumber = r'0[xX][\da-fA-F]*[lL]?' Octnumber = r'0[oO]?[0-7]*[lL]?' Decnumber = r'[1-9]\d*[lL]?' Intnumber = group(Binnumber, Hexnumber, Octnumber, Decnumber) Exponent = r'[eE][-+]?\d+' Pointfloat = group(r'\d+\.\d*', r'\.\d+') + maybe(Exponent) Expfloat = r'\d+' + Exponent Floatnumber = group(Pointfloat, Expfloat) Imagnumber = group(r'\d+[jJ]', Floatnumber + r'[jJ]') Number = group(Imagnumber, Floatnumber, Intnumber) # Tail end of ' string. Single = r"[^'\\]*(?:\\.[^'\\]*)*'" # Tail end of " string. Double = r'[^"\\]*(?:\\.[^"\\]*)*"' # Tail end of ''' string. Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" # Tail end of """ string. Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' Triple = group("[ubUB]?[rR]?'''", '[ubUB]?[rR]?"""') # Single-line ' or " string. String = group(r"[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'", r'[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"') # Because of leftmost-then-longest match semantics, be sure to put the # longest operators first (e.g., if = came before ==, == would get # recognized as two instances of =). Operator = group(r"\*\*=?", r">>=?", r"<<=?", r"<>", r"!=", r"//=?", r"->", r"[+\-*/%&@|^=<>]=?", r"~") Bracket = '[][(){}]' Special = group(r'\r?\n', r'[:;.,`@]') Funny = group(Operator, Bracket, Special) PlainToken = group(Number, Funny, String, Name) Token = Ignore + PlainToken # First (or only) line of ' or " string. ContStr = group(r"[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r'\\\r?\n'), r'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r'\\\r?\n')) PseudoExtras = group(r'\\\r?\n', Comment, Triple) PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name) tokenprog, pseudoprog, single3prog, double3prog = map( re.compile, (Token, PseudoToken, Single3, Double3)) endprogs = {"'": re.compile(Single), '"': re.compile(Double), "'''": single3prog, '"""': double3prog, "r'''": single3prog, 'r"""': double3prog, "u'''": single3prog, 'u"""': double3prog, "b'''": single3prog, 'b"""': double3prog, "ur'''": single3prog, 'ur"""': double3prog, "br'''": single3prog, 'br"""': double3prog, "R'''": single3prog, 'R"""': double3prog, "U'''": single3prog, 'U"""': double3prog, "B'''": single3prog, 'B"""': double3prog, "uR'''": single3prog, 'uR"""': double3prog, "Ur'''": single3prog, 'Ur"""': double3prog, "UR'''": single3prog, 'UR"""': double3prog, "bR'''": single3prog, 'bR"""': double3prog, "Br'''": single3prog, 'Br"""': double3prog, "BR'''": single3prog, 'BR"""': double3prog, 'r': None, 'R': None, 'u': None, 'U': None, 'b': None, 'B': None} triple_quoted = {} for t in ("'''", '"""', "r'''", 'r"""', "R'''", 'R"""', "u'''", 'u"""', "U'''", 'U"""', "b'''", 'b"""', "B'''", 'B"""', "ur'''", 'ur"""', "Ur'''", 'Ur"""', "uR'''", 'uR"""', "UR'''", 'UR"""', "br'''", 'br"""', "Br'''", 'Br"""', "bR'''", 'bR"""', "BR'''", 'BR"""',): triple_quoted[t] = t single_quoted = {} for t in ("'", '"', "r'", 'r"', "R'", 'R"', "u'", 'u"', "U'", 'U"', "b'", 'b"', "B'", 'B"', "ur'", 'ur"', "Ur'", 'Ur"', "uR'", 'uR"', "UR'", 'UR"', "br'", 'br"', "Br'", 'Br"', "bR'", 'bR"', "BR'", 'BR"', ): single_quoted[t] = t tabsize = 8 class TokenError(Exception): pass class StopTokenizing(Exception): pass def printtoken(type, token, start, end, line): # for testing (srow, scol) = start (erow, ecol) = end print "%d,%d-%d,%d:\t%s\t%s" % \ (srow, scol, erow, ecol, tok_name[type], repr(token)) def tokenize(readline, tokeneater=printtoken): """ The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). """ try: tokenize_loop(readline, tokeneater) except StopTokenizing: pass # backwards compatible interface def tokenize_loop(readline, tokeneater): for token_info in generate_tokens(readline): tokeneater(*token_info) class Untokenizer: def __init__(self): self.tokens = [] self.prev_row = 1 self.prev_col = 0 def add_whitespace(self, start): row, col = start assert row <= self.prev_row col_offset = col - self.prev_col if col_offset: self.tokens.append(" " * col_offset) def untokenize(self, iterable): for t in iterable: if len(t) == 2: self.compat(t, iterable) break tok_type, token, start, end, line = t self.add_whitespace(start) self.tokens.append(token) self.prev_row, self.prev_col = end if tok_type in (NEWLINE, NL): self.prev_row += 1 self.prev_col = 0 return "".join(self.tokens) def compat(self, token, iterable): startline = False indents = [] toks_append = self.tokens.append toknum, tokval = token if toknum in (NAME, NUMBER): tokval += ' ' if toknum in (NEWLINE, NL): startline = True for tok in iterable: toknum, tokval = tok[:2] if toknum in (NAME, NUMBER): tokval += ' ' if toknum == INDENT: indents.append(tokval) continue elif toknum == DEDENT: indents.pop() continue elif toknum in (NEWLINE, NL): startline = True elif startline and indents: toks_append(indents[-1]) startline = False toks_append(tokval) cookie_re = re.compile(r'^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)') blank_re = re.compile(r'^[ \t\f]*(?:[#\r\n]|$)') def _get_normal_name(orig_enc): """Imitates get_normal_name in tokenizer.c.""" # Only care about the first 12 characters. enc = orig_enc[:12].lower().replace("_", "-") if enc == "utf-8" or enc.startswith("utf-8-"): return "utf-8" if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): return "iso-8859-1" return orig_enc def detect_encoding(readline): """ The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. """ bom_found = False encoding = None default = 'utf-8' def read_or_stop(): try: return readline() except StopIteration: return bytes() def find_cookie(line): try: line_string = line.decode('ascii') except UnicodeDecodeError: return None match = cookie_re.match(line_string) if not match: return None encoding = _get_normal_name(match.group(1)) try: codec = lookup(encoding) except LookupError: # This behaviour mimics the Python interpreter raise SyntaxError("unknown encoding: " + encoding) if bom_found: if codec.name != 'utf-8': # This behaviour mimics the Python interpreter raise SyntaxError('encoding problem: utf-8') encoding += '-sig' return encoding first = read_or_stop() if first.startswith(BOM_UTF8): bom_found = True first = first[3:] default = 'utf-8-sig' if not first: return default, [] encoding = find_cookie(first) if encoding: return encoding, [first] if not blank_re.match(first): return default, [first] second = read_or_stop() if not second: return default, [first] encoding = find_cookie(second) if encoding: return encoding, [first, second] return default, [first, second] def untokenize(iterable): """Transform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 """ ut = Untokenizer() return ut.untokenize(iterable) def generate_tokens(readline): """ The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. """ lnum = parenlev = continued = 0 namechars, numchars = string.ascii_letters + '_', '0123456789' contstr, needcont = '', 0 contline = None indents = [0] while 1: # loop over lines in stream try: line = readline() except StopIteration: line = '' lnum = lnum + 1 pos, max = 0, len(line) if contstr: # continued string if not line: raise TokenError, ("EOF in multi-line string", strstart) endmatch = endprog.match(line) if endmatch: pos = end = endmatch.end(0) yield (STRING, contstr + line[:end], strstart, (lnum, end), contline + line) contstr, needcont = '', 0 contline = None elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n': yield (ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline) contstr = '' contline = None continue else: contstr = contstr + line contline = contline + line continue elif parenlev == 0 and not continued: # new statement if not line: break column = 0 while pos < max: # measure leading whitespace if line[pos] == ' ': column = column + 1 elif line[pos] == '\t': column = (column//tabsize + 1)*tabsize elif line[pos] == '\f': column = 0 else: break pos = pos + 1 if pos == max: break if line[pos] in '#\r\n': # skip comments or blank lines if line[pos] == '#': comment_token = line[pos:].rstrip('\r\n') nl_pos = pos + len(comment_token) yield (COMMENT, comment_token, (lnum, pos), (lnum, pos + len(comment_token)), line) yield (NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line) else: yield ((NL, COMMENT)[line[pos] == '#'], line[pos:], (lnum, pos), (lnum, len(line)), line) continue if column > indents[-1]: # count indents or dedents indents.append(column) yield (INDENT, line[:pos], (lnum, 0), (lnum, pos), line) while column < indents[-1]: if column not in indents: raise IndentationError( "unindent does not match any outer indentation level", ("", lnum, pos, line)) indents = indents[:-1] yield (DEDENT, '', (lnum, pos), (lnum, pos), line) else: # continued statement if not line: raise TokenError, ("EOF in multi-line statement", (lnum, 0)) continued = 0 while pos < max: pseudomatch = pseudoprog.match(line, pos) if pseudomatch: # scan for tokens start, end = pseudomatch.span(1) spos, epos, pos = (lnum, start), (lnum, end), end token, initial = line[start:end], line[start] if initial in numchars or \ (initial == '.' and token != '.'): # ordinary number yield (NUMBER, token, spos, epos, line) elif initial in '\r\n': newline = NEWLINE if parenlev > 0: newline = NL yield (newline, token, spos, epos, line) elif initial == '#': assert not token.endswith("\n") yield (COMMENT, token, spos, epos, line) elif token in triple_quoted: endprog = endprogs[token] endmatch = endprog.match(line, pos) if endmatch: # all on one line pos = endmatch.end(0) token = line[start:pos] yield (STRING, token, spos, (lnum, pos), line) else: strstart = (lnum, start) # multiple lines contstr = line[start:] contline = line break elif initial in single_quoted or \ token[:2] in single_quoted or \ token[:3] in single_quoted: if token[-1] == '\n': # continued string strstart = (lnum, start) endprog = (endprogs[initial] or endprogs[token[1]] or endprogs[token[2]]) contstr, needcont = line[start:], 1 contline = line break else: # ordinary string yield (STRING, token, spos, epos, line) elif initial in namechars: # ordinary name yield (NAME, token, spos, epos, line) elif initial == '\\': # continued stmt # This yield is new; needed for better idempotency: yield (NL, token, spos, (lnum, pos), line) continued = 1 else: if initial in '([{': parenlev = parenlev + 1 elif initial in ')]}': parenlev = parenlev - 1 yield (OP, token, spos, epos, line) else: yield (ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos+1), line) pos = pos + 1 for indent in indents[1:]: # pop remaining indent levels yield (DEDENT, '', (lnum, 0), (lnum, 0), '') yield (ENDMARKER, '', (lnum, 0), (lnum, 0), '') if __name__ == '__main__': # testing import sys if len(sys.argv) > 1: tokenize(open(sys.argv[1]).readline) else: tokenize(sys.stdin.readline) pgen2/tokenize.pyc000064400000041113147204472210010127 0ustar00 {fc@sdZdZdZddlZddlZddlmZmZddlTddl m Z ge e D]Z e d d krge ^qgd d d gZ [ yeWnek reZnXdZdZdZdZdZeedeeeZdZdZdZdZdZeeeeeZdZeddeeZdeZeeeZ ede dZ!ee!e eZ"dZ#d Z$d!Z%d"Z&ed#d$Z'ed%d&Z(ed'd(d)d*d+d,d-d.d/ Z)d0Z*ed1d2Z+ee)e*e+Z,ee"e,e(eZ-ee-Z.ed3ed4dd5ed6dZ/edee'Z0eee0e"e,e/eZ1e2ej3e.e1e%e&f\Z4Z5Z6Z7i&ej3e#d46ej3e$d66e6d76e7d86e6d96e7d:6e6d;6e7d<6e6d=6e7d>6e6d?6e7d@6e6dA6e7dB6e6dC6e7dD6e6dE6e7dF6e6dG6e7dH6e6dI6e7dJ6e6dK6e7dL6e6dM6e7dN6e6dO6e7dP6e6dQ6e7dR6e6dS6e7dT6ddU6ddV6ddW6ddX6ddY6ddZ6Z9iZ:xdD]Z;e;e:e;fdyYZ?dze>fd{YZ@d|ZAeAd}ZBd~ZCdddYZDej3dZEej3dZFdZGdZHdZIdZJeKdkrddlLZLeMeLjNdkreBeOeLjNdjPqeBeLjQjPndS(sTokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.sKa-Ping Yee s@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip MontanaroiN(tBOM_UTF8tlookup(t*i(ttokenit_ttokenizetgenerate_tokenst untokenizecGsddj|dS(Nt(t|t)(tjoin(tchoices((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytgroup0tcGst|dS(NR(R (R ((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytany1RcGst|dS(Nt?(R (R ((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytmaybe2Rs[ \f\t]*s #[^\r\n]*s\\\r?\ns [a-zA-Z_]\w*s 0[bB][01]*s0[xX][\da-fA-F]*[lL]?s0[oO]?[0-7]*[lL]?s [1-9]\d*[lL]?s [eE][-+]?\d+s\d+\.\d*s\.\d+s\d+s\d+[jJ]s[jJ]s[^'\\]*(?:\\.[^'\\]*)*'s[^"\\]*(?:\\.[^"\\]*)*"s%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''s%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""s[ubUB]?[rR]?'''s[ubUB]?[rR]?"""s&[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'s&[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"s\*\*=?s>>=?s<<=?s<>s!=s//=?s->s[+\-*/%&@|^=<>]=?t~s[][(){}]s\r?\ns[:;.,`@]s'[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*t's'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*t"s'''s"""sr'''sr"""su'''su"""sb'''sb"""sur'''sur"""sbr'''sbr"""sR'''sR"""sU'''sU"""sB'''sB"""suR'''suR"""sUr'''sUr"""sUR'''sUR"""sbR'''sbR"""sBr'''sBr"""sBR'''sBR"""trtRtutUtbtBsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"it TokenErrorcBseZRS((t__name__t __module__(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRstStopTokenizingcBseZRS((RR(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRsc CsA|\}}|\}}d||||t|t|fGHdS(Ns%d,%d-%d,%d: %s %s(ttok_nametrepr( ttypeRtstarttendtlinetsrowtscolterowtecol((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt printtokens  cCs)yt||Wntk r$nXdS(s: The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). N(t tokenize_loopR(treadlinet tokeneater((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRs  cCs%xt|D]}||q WdS(N(R(R+R,t token_info((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR*st UntokenizercBs,eZdZdZdZdZRS(cCsg|_d|_d|_dS(Nii(ttokenstprev_rowtprev_col(tself((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt__init__s  cCsO|\}}||jks!t||j}|rK|jjd|ndS(Nt (R0tAssertionErrorR1R/tappend(R2R"trowtcolt col_offset((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytadd_whitespaces   cCsx|D]}t|dkr3|j||Pn|\}}}}}|j||jj||\|_|_|ttfkr|jd7_d|_qqWdj |jS(NiiiR( tlentcompatR:R/R6R0R1tNEWLINEtNLR (R2titerablettttok_typeRR"R#R$((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRs  c Cs%t}g}|jj}|\}}|ttfkrC|d7}n|ttfkr^t}nx|D]}|d \}}|ttfkr|d7}n|tkr|j|qenZ|t kr|j qen>|ttfkrt}n#|r|r||dt}n||qeWdS(NR4ii( tFalseR/R6tNAMEtNUMBERR=R>tTruetINDENTtDEDENTtpop( R2RR?t startlinetindentst toks_appendttoknumttokvalttok((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR<s0             (RRR3R:RR<(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR.s   s&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCs^|d jjdd}|dks7|jdr;dS|d ksV|jd rZdS|S(s(Imitates get_normal_name in tokenizer.c.i Rt-sutf-8sutf-8-slatin-1s iso-8859-1s iso-latin-1slatin-1-s iso-8859-1-s iso-latin-1-(slatin-1s iso-8859-1s iso-latin-1(slatin-1-s iso-8859-1-s iso-latin-1-(tlowertreplacet startswith(torig_enctenc((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt_get_normal_names cstd}d}fd}fd}|}|jtrat|d}d}n|sq|gfS||}|r||gfStj|s||gfS|}|s||gfS||}|r|||gfS|||gfS(s The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. sutf-8cs'y SWntk r"tSXdS(N(t StopIterationtbytes((R+(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt read_or_stops  csy|jd}Wntk r'dSXtj|}|sAdSt|jd}yt|}Wn!tk rt d|nXr|j dkrt dn|d7}n|S(Ntasciiisunknown encoding: sutf-8sencoding problem: utf-8s-sig( tdecodetUnicodeDecodeErrortNonet cookie_retmatchRUR Rt LookupErrort SyntaxErrortname(R$t line_stringR^tencodingtcodec(t bom_found(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt find_cookies"   is utf-8-sigN(RBR\RRRREtblank_reR^(R+RctdefaultRXRftfirsttsecond((ReR+s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytdetect_encodings0          cCst}|j|S(sTransform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 (R.R(R?tut((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRFs ccsVd}}}tjdd}}d\}}d}dg} xy |} Wntk rfd} nX|d}dt| } } |r{| std| fn|j| }|r|jd} }t|| | | ||f|| fVd\}}d}q|ra| ddkra| d d krat || | |t| f|fVd}d}q@q|| }|| }q@n`|dkr| r| sPnd}xv| | kr| | d kr|d}n?| | d kr|t dt }n| | d krd}nP| d} qW| | kr'Pn| | dkr| | dkr| | j d}| t|}t ||| f|| t|f| fVt | |||f|t| f| fVq@t t f| | dk| | || f|t| f| fVq@n|| dkrI| j|t| | |df|| f| fVnx|| dkr|| krtdd|| | fn| d } td|| f|| f| fVqLWn$| std|dffnd}x| | krtj| | }|r|jd\}}||f||f|}}} | ||!| |}}||kss|dkr|dkrt|||| fVq|dkrt}|dkrt }n||||| fVq|dkr|jd stt |||| fVq|tkrt|}|j| | }|rh|jd} | || !}t|||| f| fVq||f} | |}| }Pq|tks|d tks|d tkr(|ddkr||f} t|pt|dpt|d}| |d}}| }Pqt|||| fVq||krKt|||| fVq|dkrzt |||| f| fVd}q|dkr|d}n|dkr|d}nt|||| fVqt | | || f|| df| fV| d} qWq@Wx2| dD]&}td|df|dfdfVqWtd|df|dfdfVdS(sT The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. iRt 0123456789RisEOF in multi-line stringis\ is\ R4s s s# t#s is3unindent does not match any outer indentation levels sEOF in multi-line statementt.s iis\s([{s)]}N(Ri(Ri(tstringt ascii_lettersR\RVR;RR^R#tSTRINGt ERRORTOKENttabsizetrstriptCOMMENTR>R6RFtIndentationErrorRGt pseudoprogtspanRDR=tendswithR5t triple_quotedtendprogst single_quotedRCtOPt ENDMARKER(R+tlnumtparenlevt continuedt namecharstnumcharstcontstrtneedconttcontlineRJR$tpostmaxtstrstarttendprogtendmatchR#tcolumnt comment_tokentnl_post pseudomatchR"tsposteposRtinitialtnewlinetindent((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR[s        &      $ #  '  '                   $t__main__(s'''s"""sr'''sr"""sR'''sR"""su'''su"""sU'''sU"""sb'''sb"""sB'''sB"""sur'''sur"""sUr'''sUr"""suR'''suR"""sUR'''sUR"""sbr'''sbr"""sBr'''sBr"""sbR'''sbR"""sBR'''sBR"""(RRsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"((Rt__doc__t __author__t __credits__RptretcodecsRRtlib2to3.pgen2.tokenRRtdirtxt__all__RWt NameErrortstrR RRt WhitespacetCommenttIgnoretNamet Binnumbert Hexnumbert Octnumbert Decnumbert IntnumbertExponentt PointfloattExpfloatt Floatnumbert ImagnumbertNumbertSingletDoubletSingle3tDouble3tTripletStringtOperatortBrackettSpecialtFunnyt PlainTokentTokentContStrt PseudoExtrast PseudoTokentmaptcompilet tokenprogRxt single3progt double3progR\R|R{R@R}Rtt ExceptionRRR)RR*R.R]RgRURkRRRtsysR;targvtopenR+tstdin(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyts /           '#     8 I   pgen2/tokenize.pyo000064400000040762147204472210010154 0ustar00 {fc@sdZdZdZddlZddlZddlmZmZddlTddl m Z ge e D]Z e d d krge ^qgd d d gZ [ yeWnek reZnXdZdZdZdZdZeedeeeZdZdZdZdZdZeeeeeZdZeddeeZdeZeeeZ ede dZ!ee!e eZ"dZ#d Z$d!Z%d"Z&ed#d$Z'ed%d&Z(ed'd(d)d*d+d,d-d.d/ Z)d0Z*ed1d2Z+ee)e*e+Z,ee"e,e(eZ-ee-Z.ed3ed4dd5ed6dZ/edee'Z0eee0e"e,e/eZ1e2ej3e.e1e%e&f\Z4Z5Z6Z7i&ej3e#d46ej3e$d66e6d76e7d86e6d96e7d:6e6d;6e7d<6e6d=6e7d>6e6d?6e7d@6e6dA6e7dB6e6dC6e7dD6e6dE6e7dF6e6dG6e7dH6e6dI6e7dJ6e6dK6e7dL6e6dM6e7dN6e6dO6e7dP6e6dQ6e7dR6e6dS6e7dT6ddU6ddV6ddW6ddX6ddY6ddZ6Z9iZ:xdD]Z;e;e:e;fdyYZ?dze>fd{YZ@d|ZAeAd}ZBd~ZCdddYZDej3dZEej3dZFdZGdZHdZIdZJeKdkrddlLZLeMeLjNdkreBeOeLjNdjPqeBeLjQjPndS(sTokenization help for Python programs. generate_tokens(readline) is a generator that breaks a stream of text into Python tokens. It accepts a readline-like method which is called repeatedly to get the next line of input (or "" for EOF). It generates 5-tuples with these members: the token type (see token.py) the token (a string) the starting (row, column) indices of the token (a 2-tuple of ints) the ending (row, column) indices of the token (a 2-tuple of ints) the original line (string) It is designed to match the working of the Python tokenizer exactly, except that it produces COMMENT tokens for comments and gives type OP for all operators Older entry points tokenize_loop(readline, tokeneater) tokenize(readline, tokeneater=printtoken) are the same, except instead of generating tokens, tokeneater is a callback function to which the 5 fields described above are passed as 5 arguments, each time a new token is found.sKa-Ping Yee s@GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, Skip MontanaroiN(tBOM_UTF8tlookup(t*i(ttokenit_ttokenizetgenerate_tokenst untokenizecGsddj|dS(Nt(t|t)(tjoin(tchoices((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytgroup0tcGst|dS(NR(R (R ((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytany1RcGst|dS(Nt?(R (R ((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytmaybe2Rs[ \f\t]*s #[^\r\n]*s\\\r?\ns [a-zA-Z_]\w*s 0[bB][01]*s0[xX][\da-fA-F]*[lL]?s0[oO]?[0-7]*[lL]?s [1-9]\d*[lL]?s [eE][-+]?\d+s\d+\.\d*s\.\d+s\d+s\d+[jJ]s[jJ]s[^'\\]*(?:\\.[^'\\]*)*'s[^"\\]*(?:\\.[^"\\]*)*"s%[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''s%[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""s[ubUB]?[rR]?'''s[ubUB]?[rR]?"""s&[uU]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*'s&[uU]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*"s\*\*=?s>>=?s<<=?s<>s!=s//=?s->s[+\-*/%&@|^=<>]=?t~s[][(){}]s\r?\ns[:;.,`@]s'[uUbB]?[rR]?'[^\n'\\]*(?:\\.[^\n'\\]*)*t's'[uUbB]?[rR]?"[^\n"\\]*(?:\\.[^\n"\\]*)*t"s'''s"""sr'''sr"""su'''su"""sb'''sb"""sur'''sur"""sbr'''sbr"""sR'''sR"""sU'''sU"""sB'''sB"""suR'''suR"""sUr'''sUr"""sUR'''sUR"""sbR'''sbR"""sBr'''sBr"""sBR'''sBR"""trtRtutUtbtBsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"it TokenErrorcBseZRS((t__name__t __module__(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRstStopTokenizingcBseZRS((RR(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRsc CsA|\}}|\}}d||||t|t|fGHdS(Ns%d,%d-%d,%d: %s %s(ttok_nametrepr( ttypeRtstarttendtlinetsrowtscolterowtecol((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt printtokens  cCs)yt||Wntk r$nXdS(s: The tokenize() function accepts two parameters: one representing the input stream, and one providing an output mechanism for tokenize(). The first parameter, readline, must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. The second parameter, tokeneater, must also be a callable object. It is called once for each token, with five arguments, corresponding to the tuples generated by generate_tokens(). N(t tokenize_loopR(treadlinet tokeneater((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRs  cCs%xt|D]}||q WdS(N(R(R+R,t token_info((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR*st UntokenizercBs,eZdZdZdZdZRS(cCsg|_d|_d|_dS(Nii(ttokenstprev_rowtprev_col(tself((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt__init__s  cCs:|\}}||j}|r6|jjd|ndS(Nt (R1R/tappend(R2R"trowtcolt col_offset((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytadd_whitespaces  cCsx|D]}t|dkr3|j||Pn|\}}}}}|j||jj||\|_|_|ttfkr|jd7_d|_qqWdj |jS(NiiiR( tlentcompatR9R/R5R0R1tNEWLINEtNLR (R2titerablettttok_typeRR"R#R$((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRs  c Cs%t}g}|jj}|\}}|ttfkrC|d7}n|ttfkr^t}nx|D]}|d \}}|ttfkr|d7}n|tkr|j|qenZ|t kr|j qen>|ttfkrt}n#|r|r||dt}n||qeWdS(NR4ii( tFalseR/R5tNAMEtNUMBERR<R=tTruetINDENTtDEDENTtpop( R2RR>t startlinetindentst toks_appendttoknumttokvalttok((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR;s0             (RRR3R9RR;(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR.s   s&^[ \t\f]*#.*?coding[:=][ \t]*([-\w.]+)s^[ \t\f]*(?:[#\r\n]|$)cCs^|d jjdd}|dks7|jdr;dS|d ksV|jd rZdS|S(s(Imitates get_normal_name in tokenizer.c.i Rt-sutf-8sutf-8-slatin-1s iso-8859-1s iso-latin-1slatin-1-s iso-8859-1-s iso-latin-1-(slatin-1s iso-8859-1s iso-latin-1(slatin-1-s iso-8859-1-s iso-latin-1-(tlowertreplacet startswith(torig_enctenc((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt_get_normal_names cstd}d}fd}fd}|}|jtrat|d}d}n|sq|gfS||}|r||gfStj|s||gfS|}|s||gfS||}|r|||gfS|||gfS(s The detect_encoding() function is used to detect the encoding that should be used to decode a Python source file. It requires one argument, readline, in the same way as the tokenize() generator. It will call readline a maximum of twice, and return the encoding used (as a string) and a list of any lines (left as bytes) it has read in. It detects the encoding from the presence of a utf-8 bom or an encoding cookie as specified in pep-0263. If both a bom and a cookie are present, but disagree, a SyntaxError will be raised. If the encoding cookie is an invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, 'utf-8-sig' is returned. If no encoding is specified, then the default of 'utf-8' will be returned. sutf-8cs'y SWntk r"tSXdS(N(t StopIterationtbytes((R+(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt read_or_stops  csy|jd}Wntk r'dSXtj|}|sAdSt|jd}yt|}Wn!tk rt d|nXr|j dkrt dn|d7}n|S(Ntasciiisunknown encoding: sutf-8sencoding problem: utf-8s-sig( tdecodetUnicodeDecodeErrortNonet cookie_retmatchRTR Rt LookupErrort SyntaxErrortname(R$t line_stringR]tencodingtcodec(t bom_found(s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyt find_cookies"   is utf-8-sigN(RAR[RQRRDtblank_reR](R+RbtdefaultRWRetfirsttsecond((RdR+s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pytdetect_encodings0          cCst}|j|S(sTransform tokens back into Python source code. Each element returned by the iterable must be a token sequence with at least two elements, a token number and token value. If only two tokens are passed, the resulting output is poor. Round-trip invariant for full input: Untokenized source will match input source exactly Round-trip invariant for limited intput: # Output text will tokenize the back to the input t1 = [tok[:2] for tok in generate_tokens(f.readline)] newcode = untokenize(t1) readline = iter(newcode.splitlines(1)).next t2 = [tok[:2] for tokin generate_tokens(readline)] assert t1 == t2 (R.R(R>tut((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyRFs ccs@d}}}tjdd}}d\}}d}dg} xy |} Wntk rfd} nX|d}dt| } } |r{| std| fn|j| }|r|jd} }t|| | | ||f|| fVd\}}d}q|ra| ddkra| d d krat || | |t| f|fVd}d}q@q|| }|| }q@n`|dkr| r| sPnd}xv| | kr| | d kr|d}n?| | d kr|t dt }n| | d krd}nP| d} qW| | kr'Pn| | dkr| | dkr| | j d}| t|}t ||| f|| t|f| fVt | |||f|t| f| fVq@t t f| | dk| | || f|t| f| fVq@n|| dkrI| j|t| | |df|| f| fVnx|| dkr|| krtdd|| | fn| d } td|| f|| f| fVqLWn$| std|dffnd}x| | krtj| | }|r|jd\}}||f||f|}}} | ||!| |}}||kss|dkr|dkrt|||| fVq|dkrt}|dkrt }n||||| fVq|dkrt |||| fVq|tkrrt|}|j| | }|rR|jd} | || !}t|||| f| fVq||f} | |}| }Pq|tks|d tks|d tkr|ddkr||f} t|pt|dpt|d}| |d}}| }Pqt|||| fVq||kr5t|||| fVq|dkrdt |||| f| fVd}q|dkr}|d}n|dkr|d}nt|||| fVqt | | || f|| df| fV| d} qWq@Wx2| dD]&}td|df|dfdfVqWtd|df|dfdfVdS(sT The generate_tokens() generator requires one argument, readline, which must be a callable object which provides the same interface as the readline() method of built-in file objects. Each call to the function should return one line of input as a string. Alternately, readline can be a callable function terminating with StopIteration: readline = open(myfile).next # Example of alternate readline The generator produces 5-tuples with these members: the token type; the token string; a 2-tuple (srow, scol) of ints specifying the row and column where the token begins in the source; a 2-tuple (erow, ecol) of ints specifying the row and column where the token ends in the source; and the line on which the token was found. The line passed is the logical line; continuation lines are included. iRt 0123456789RisEOF in multi-line stringis\ is\ R4s s s# t#s is3unindent does not match any outer indentation levels sEOF in multi-line statementt.iis s\s([{s)]}N(Ri(Ri(tstringt ascii_lettersR[RUR:RR]R#tSTRINGt ERRORTOKENttabsizetrstriptCOMMENTR=R5REtIndentationErrorRFt pseudoprogtspanRCR<t triple_quotedtendprogst single_quotedRBtOPt ENDMARKER(R+tlnumtparenlevt continuedt namecharstnumcharstcontstrtneedconttcontlineRIR$tpostmaxtstrstarttendprogtendmatchR#tcolumnt comment_tokentnl_post pseudomatchR"tsposteposRtinitialtnewlinetindent((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyR[s        &      $ #  '  '                   $t__main__(s'''s"""sr'''sr"""sR'''sR"""su'''su"""sU'''sU"""sb'''sb"""sB'''sB"""sur'''sur"""sUr'''sUr"""suR'''suR"""sUR'''sUR"""sbr'''sbr"""sBr'''sBr"""sbR'''sbR"""sBR'''sBR"""(RRsr'sr"sR'sR"su'su"sU'sU"sb'sb"sB'sB"sur'sur"sUr'sUr"suR'suR"sUR'sUR"sbr'sbr"sBr'sBr"sbR'sbR"sBR'sBR"((Rt__doc__t __author__t __credits__RotretcodecsRRtlib2to3.pgen2.tokenRRtdirtxt__all__RVt NameErrortstrR RRt WhitespacetCommenttIgnoretNamet Binnumbert Hexnumbert Octnumbert Decnumbert IntnumbertExponentt PointfloattExpfloatt Floatnumbert ImagnumbertNumbertSingletDoubletSingle3tDouble3tTripletStringtOperatortBrackettSpecialtFunnyt PlainTokentTokentContStrt PseudoExtrast PseudoTokentmaptcompilet tokenprogRwt single3progt double3progR[RzRyR?R{Rst ExceptionRRR)RR*R.R\RfRTRjRRRtsysR:targvtopenR+tstdin(((s./usr/lib64/python2.7/lib2to3/pgen2/tokenize.pyts /           '#     8 I   Grammar.txt000064400000015666147204472210006714 0ustar00# Grammar for 2to3. This grammar supports Python 2.x and 3.x. # Note: Changing the grammar specified in this file will most likely # require corresponding changes in the parser module # (../Modules/parsermodule.c). If you can't make the changes to # that module yourself, please co-ordinate the required changes # with someone who can; ask around on python-dev for help. Fred # Drake will probably be listening there. # NOTE WELL: You should also follow all the steps listed in PEP 306, # "How to Change Python's Grammar" # Commands for Kees Blom's railroad program #diagram:token NAME #diagram:token NUMBER #diagram:token STRING #diagram:token NEWLINE #diagram:token ENDMARKER #diagram:token INDENT #diagram:output\input python.bla #diagram:token DEDENT #diagram:output\textwidth 20.04cm\oddsidemargin 0.0cm\evensidemargin 0.0cm #diagram:rules # Start symbols for the grammar: # file_input is a module or sequence of commands read from an input file; # single_input is a single interactive statement; # eval_input is the input for the eval() and input() functions. # NB: compound_stmt in single_input is followed by extra NEWLINE! file_input: (NEWLINE | stmt)* ENDMARKER single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE eval_input: testlist NEWLINE* ENDMARKER decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE decorators: decorator+ decorated: decorators (classdef | funcdef) funcdef: 'def' NAME parameters ['->' test] ':' suite parameters: '(' [typedargslist] ')' typedargslist: ((tfpdef ['=' test] ',')* ('*' [tname] (',' tname ['=' test])* [',' '**' tname] | '**' tname) | tfpdef ['=' test] (',' tfpdef ['=' test])* [',']) tname: NAME [':' test] tfpdef: tname | '(' tfplist ')' tfplist: tfpdef (',' tfpdef)* [','] varargslist: ((vfpdef ['=' test] ',')* ('*' [vname] (',' vname ['=' test])* [',' '**' vname] | '**' vname) | vfpdef ['=' test] (',' vfpdef ['=' test])* [',']) vname: NAME vfpdef: vname | '(' vfplist ')' vfplist: vfpdef (',' vfpdef)* [','] stmt: simple_stmt | compound_stmt simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE small_stmt: (expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | exec_stmt | assert_stmt) expr_stmt: testlist_star_expr (augassign (yield_expr|testlist) | ('=' (yield_expr|testlist_star_expr))*) testlist_star_expr: (test|star_expr) (',' (test|star_expr))* [','] augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' | '<<=' | '>>=' | '**=' | '//=') # For normal assignments, additional restrictions enforced by the interpreter print_stmt: 'print' ( [ test (',' test)* [','] ] | '>>' test [ (',' test)+ [','] ] ) del_stmt: 'del' exprlist pass_stmt: 'pass' flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt break_stmt: 'break' continue_stmt: 'continue' return_stmt: 'return' [testlist] yield_stmt: yield_expr raise_stmt: 'raise' [test ['from' test | ',' test [',' test]]] import_stmt: import_name | import_from import_name: 'import' dotted_as_names import_from: ('from' ('.'* dotted_name | '.'+) 'import' ('*' | '(' import_as_names ')' | import_as_names)) import_as_name: NAME ['as' NAME] dotted_as_name: dotted_name ['as' NAME] import_as_names: import_as_name (',' import_as_name)* [','] dotted_as_names: dotted_as_name (',' dotted_as_name)* dotted_name: NAME ('.' NAME)* global_stmt: ('global' | 'nonlocal') NAME (',' NAME)* exec_stmt: 'exec' expr ['in' test [',' test]] assert_stmt: 'assert' test [',' test] compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | with_stmt | funcdef | classdef | decorated if_stmt: 'if' test ':' suite ('elif' test ':' suite)* ['else' ':' suite] while_stmt: 'while' test ':' suite ['else' ':' suite] for_stmt: 'for' exprlist 'in' testlist ':' suite ['else' ':' suite] try_stmt: ('try' ':' suite ((except_clause ':' suite)+ ['else' ':' suite] ['finally' ':' suite] | 'finally' ':' suite)) with_stmt: 'with' with_item (',' with_item)* ':' suite with_item: test ['as' expr] with_var: 'as' expr # NB compile.c makes sure that the default except clause is last except_clause: 'except' [test [(',' | 'as') test]] suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT # Backward compatibility cruft to support: # [ x for x in lambda: True, lambda: False if x() ] # even while also allowing: # lambda x: 5 if x else 2 # (But not a mix of the two) testlist_safe: old_test [(',' old_test)+ [',']] old_test: or_test | old_lambdef old_lambdef: 'lambda' [varargslist] ':' old_test test: or_test ['if' or_test 'else' test] | lambdef or_test: and_test ('or' and_test)* and_test: not_test ('and' not_test)* not_test: 'not' not_test | comparison comparison: expr (comp_op expr)* comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' star_expr: '*' expr expr: xor_expr ('|' xor_expr)* xor_expr: and_expr ('^' and_expr)* and_expr: shift_expr ('&' shift_expr)* shift_expr: arith_expr (('<<'|'>>') arith_expr)* arith_expr: term (('+'|'-') term)* term: factor (('*'|'@'|'/'|'%'|'//') factor)* factor: ('+'|'-'|'~') factor | power power: atom trailer* ['**' factor] atom: ('(' [yield_expr|testlist_gexp] ')' | '[' [listmaker] ']' | '{' [dictsetmaker] '}' | '`' testlist1 '`' | NAME | NUMBER | STRING+ | '.' '.' '.') listmaker: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) testlist_gexp: (test|star_expr) ( comp_for | (',' (test|star_expr))* [','] ) lambdef: 'lambda' [varargslist] ':' test trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME subscriptlist: subscript (',' subscript)* [','] subscript: test | [test] ':' [test] [sliceop] sliceop: ':' [test] exprlist: (expr|star_expr) (',' (expr|star_expr))* [','] testlist: test (',' test)* [','] dictsetmaker: ( ((test ':' test | '**' expr) (comp_for | (',' (test ':' test | '**' expr))* [','])) | ((test | star_expr) (comp_for | (',' (test | star_expr))* [','])) ) classdef: 'class' NAME ['(' [arglist] ')'] ':' suite arglist: argument (',' argument)* [','] # "test '=' test" is really "keyword '=' test", but we have no such token. # These need to be in a single rule to avoid grammar that is ambiguous # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, # we explicitly match '*' here, too, to give it proper precedence. # Illegal combinations and orderings are blocked in ast.c: # multiple (test comp_for) arguments are blocked; keyword unpackings # that precede iterable unpackings are blocked; etc. argument: ( test [comp_for] | test '=' test | '**' expr | star_expr ) comp_iter: comp_for | comp_if comp_for: 'for' exprlist 'in' testlist_safe [comp_iter] comp_if: 'if' old_test [comp_iter] testlist1: test (',' test)* # not used in grammar, but may appear in "node" passed from Parser to Compiler encoding_decl: NAME yield_expr: 'yield' [yield_arg] yield_arg: 'from' test | testlist Grammar2.7.18.final.0.pickle000064400000117046147204472210011243 0ustar00ccollections OrderedDict q]q(]q(Udfasqh]q(]q(M]q(]q(KKqKKq KKq e]q KKq aeh]q (]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q(K Ke]q(K Ke]q(K Ke]q(K Ke]q(K Ke]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q (KKe]q!(KKe]q"(KKe]q#(KKe]q$(KKe]q%(KKe]q&(KKe]q'(KKe]q((KKe]q)(KKe]q*(KKe]q+(KKe]q,(K Ke]q-(K!Ke]q.(K"Ke]q/(K#Ke]q0(K$Ke]q1(K%Ke]q2(K&Ke]q3(K'Keeq4Rq5q6e]q7(M]q8(]q9K(Kq:a]q;(K)Kq(]q?(KKe]q@(KKe]qA(KKe]qB(KKe]qC(K Ke]qD(K Ke]qE(K#Ke]qF(K$Ke]qG(K%Ke]qH(K&Ke]qI(K'KeeqJRqKqLe]qM(M]qN(]qOK*KqPa]qQ(K+KqRKKqSeeh]qT(]qU(KKe]qV(KKe]qW(KKe]qX(KKe]qY(K Ke]qZ(K Ke]q[(KKe]q\(K#Ke]q](K$Ke]q^(K%Ke]q_(K&Ke]q`(K'KeeqaRqbqce]qd(M]qe(]qfK,Kqga]qh(K-KqiKKqje]qk(K,KqlKKqmeeh]qn(]qo(KKe]qp(KKe]qq(KKe]qr(KKe]qs(KKe]qt(K Ke]qu(K Ke]qv(KKe]qw(KKe]qx(K#Ke]qy(K$Ke]qz(K%Ke]q{(K&Ke]q|(K'Ke]q}(K.Keeq~Rqqe]q(M]q(]q(K.KqK/KqK0Kqe]qK1Kqa]qKKqa]q(K2KqK3KqKKqe]qK0Kqaeh]q(]q(KKe]q(KKe]q(KKe]q(KKe]q(KKe]q(K Ke]q(K Ke]q(KKe]q(KKe]q(K#Ke]q(K$Ke]q(K%Ke]q(K&Ke]q(K'Ke]q(K.KeeqRqqe]q(M]q(]qK4Kqa]q(KKqKKqKKqeeh]q(]q(KKe]q(KKe]q(KKe]q(KKe]q(K Ke]q(K Ke]q(K#Ke]q(K$Ke]q(K%Ke]q(K&Ke]q(K'KeeqRqqe]q(M]q(]qK Kqa]qK0Kqa]q(K-KqKKqe]qK0Kqa]qKKqaeh]q]q(K KeaqRqˆqe]q(M]q(]q(KKqKKqK KqK KqK#KqK%KqK&KqK'Kqe]q(K5KqK6KqK7Kqe]qKK qa]q(K8KqK9K qe]qK:K qa]q(K;KqKKrK?KrK@KrKAKrKBKr KCKr KDKr KEKr KFKr KGKrKHKrKIKre]rKKraeh]r(]r(K=Ke]r(K>Ke]r(K?Ke]r(K@Ke]r(KAKe]r(KBKe]r(KCKe]r(KDKe]r(KEKe]r(KFKe]r(KGKe]r(KHKe]r (KIKeer!Rr"r#e]r$(M ]r%(]r&K Kr'a]r(KKr)aeh]r*]r+(K Kear,Rr-r.e]r/(M ]r0(]r1KKr2a]r3K%Kr4a]r5(KKr6KJKr7e]r8(K5Kr9KKKr:e]r;KLKr<a]r=KJKr>a]r?K5Kr@a]rAKKrBaeh]rC]rD(KKearERrFrGe]rH(M ]rI(]rJKKrKa]rLKMKrMa]rNKNKrOa]rPKOKrQa]rR(KPKrSKKrTe]rUKKrVaeh]rW]rX(KKearYRrZr[e]r\(M ]r](]r^KKr_a]r`KQKraa]rb(KPKrcKKrde]reKKrfaeh]rg]rh(KKeariRrjrke]rl(M ]rm(]rn(K3KroKRKrpe]rqKKrraeh]rs(]rt(KKe]ru(KKeervRrwrxe]ry(M]rz(]r{(KSKr|KTKr}KUKr~KSKrKVKrKWKrKXKrKNKrKYKrKKre]rKKra]r(KKrKKre]rKNKraeh]r(]r(KKe]r(KNKe]r(KSKe]r(KTKe]r(KUKe]r(KVKe]r(KWKe]r(KXKe]r(KYKeerRrre]r(M]r(]rK1Kra]r(KZKrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M]r(]r(K[KrK\KrK]KrK^KrK_KrK`KrKaKrKbKre]rKKraeh]r(]r(K Ke]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K!KeerRrre]r(M]r(]rKKra]rKKraeh]r]r(KKearRrre]r(M]r(]rKcKra]r(K[KrK^Kre]rKKraeh]r]r(K KearRrre]r(M]r(]rK Kra]rKdKra]r(KKrKKre]r(K5KrKKKre]rKKra]rKKra]rK5Kraeh]r]r(K KearRrre]r(M]r(]rKeKra]r(KeKrKKreeh]r]r(K KearRrre]r(M]r(]rKKra]r KMKr a]r KKr aeh]r ]r(KKearRrre]r(M]r(]r(K.KrK/KrK0Kre]rK1Kra]r(K-KrK3KrKKre]r(K-KrKJKr K3Kr!KKr"e]r#(K-Kr$K3Kr%KKr&e]r'(K/K r(K0K r)KKr*e]r+KKr,a]r-K0Kr.a]r/(K.K r0K0K r1KKr2e]r3(K-Kr4KK r5e]r6K1K r7a]r8KJK r9a]r:(K-Kr;KK r<e]r=K0K r>aeh]r?(]r@(KKe]rA(KKe]rB(KKe]rC(KKe]rD(KKe]rE(K Ke]rF(K Ke]rG(KKe]rH(KKe]rI(K#Ke]rJ(K$Ke]rK(K%Ke]rL(K&Ke]rM(K'Ke]rN(K.KeerORrPrQe]rR(M]rS(]rTKdKrUa]rV(KfKrWKKrXe]rYK%KrZa]r[KKr\aeh]r]]r^(K%Kear_Rr`rae]rb(M]rc(]rdKgKrea]rf(K-KrgKKrheeh]ri]rj(K%KearkRrlrme]rn(M]ro(]rpK%Krqa]rr(KKrsKKrteeh]ru]rv(K%KearwRrxrye]rz(M]r{(]r|K%Kr}a]r~KKraeh]r]r(K%KearRrre]r(M]r(]rKhKra]r(KKrKKre]rKKraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M]r(]rKiKra]r(K0KrKKre]r(K-KrKfKrKKre]rK0Kra]rKKraeh]r]r(KiKearRrre]r(M]r(]rKKra]rK1Kra]r(KNKrKKre]rK0Kra]r(K-KrKKre]rK0Kra]rKKraeh]r]r(KKearRrre]r(M]r(]rKjKra]r(KkKrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M]r(]rKlKra]r(K2KrKmKrKKre]r(KlKrK7Kre]r(KhKrK7Kre]r(K2KrKKre]rKKraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M ]r(]r(K1KrK/Kr e]r (K-Kr KKr e]r (K1KrK/KrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrr e]r!(M!]r"(]r#(KKr$KKr%K$Kr&KnKr'e]r(KoKr)a]r*KKr+aeh]r,(]r-(KKe]r.(KKe]r/(KKe]r0(KKe]r1(K Ke]r2(K Ke]r3(K#Ke]r4(K$Ke]r5(K%Ke]r6(K&Ke]r7(K'Keer8Rr9r:e]r;(M"]r<(]r=(KpKr>KqKr?KrKr@KsKrAKtKrBe]rCKKrDaeh]rE(]rF(K Ke]rG(KKe]rH(KKe]rI(KKe]rJ(K"KeerKRrLrMe]rN(M#]rO(]rPKKrQa]rRKMKrSa]rTKNKrUa]rVKhKrWa]rXKJKrYa]rZKLKr[a]r\(KuKr]KKr^e]r_KJKr`a]raKLK rba]rcKK rdaeh]re]rf(KKeargRrhrie]rj(M$]rk(]rlKKrma]rnK%Kroa]rpKvKrqa]rr(KwKrsKJKrte]ruK0Krva]rwKLKrxa]ryKJKrza]r{KKr|aeh]r}]r~(KKearRrre]r(M%]r(]r(KKrKKre]rK%Kra]r(K-KrKKreeh]r(]r(KKe]r(KKeerRrre]r(M&]r(]rKKra]rK0Kra]rKJKra]rKLKra]r(KxKrKuKrKKre]rKJKra]rKLKra]rKKraeh]r]r(KKearRrre]r(M']r(]rK%Kra]r(KfKrKKre]rK%Kra]rKKraeh]r]r(K%KearRrre]r(M(]r(]rKyKra]r(K-KrKKre]r(KyKrKKreeh]r]r(K%KearRrre]r(M)]r(]rKKra]r(KKrKdKre]r(KKrKKrKdKre]rKKra]r(KKrKKrKzKre]rKzKra]rKKra]rK5Kraeh]r]r(KKearRrre]r(M*]r(]rKKra]rK{Kra]rKKraeh]r]r(KKearRrre]r(M+]r(]r(K|KrK}Kre]rKKraeh]r(]r(KKe]r(KKeerRrre]r(M,]r(]rKKra]r(KJKrK~Kre]rK0Kra]r KJKr a]r KKr aeh]r ]r(KKearRrre]r(M-]r(]r(K/KrK0Kre]r(K-KrK3KrKKre]r(K/KrK0KrKKre]rKKr a]r!(K-Kr"KKr#eeh]r$(]r%(KKe]r&(KKe]r'(KKe]r((KKe]r)(KKe]r*(K Ke]r+(K Ke]r,(KKe]r-(KKe]r.(K#Ke]r/(K$Ke]r0(K%Ke]r1(K&Ke]r2(K'Keer3Rr4r5e]r6(M.]r7(]r8(KKr9KKr:e]r;K*Kr<a]r=KKr>aeh]r?(]r@(KKe]rA(KKe]rB(KKe]rC(KKe]rD(K Ke]rE(K Ke]rF(KKe]rG(K#Ke]rH(K$Ke]rI(K%Ke]rJ(K&Ke]rK(K'KeerLRrMrNe]rO(M/]rP(]rQKKrRa]rS(KJKrTK~KrUe]rVKQKrWa]rXKJKrYa]rZKKr[aeh]r\]r](KKear^Rr_r`e]ra(M0]rb(]rc(KKrdKKree]rfKKrgaeh]rh(]ri(KKe]rj(KKe]rk(KKe]rl(KKe]rm(K Ke]rn(K Ke]ro(KKe]rp(KKe]rq(K#Ke]rr(K$Ke]rs(K%Ke]rt(K&Ke]ru(K'KeervRrwrxe]ry(M1]rz(]r{KKr|a]r}(KKr~KKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M2]r(]rKKra]r(K5KrKKre]rKKra]rK5Kraeh]r]r(KKearRrre]r(M3]r(]rKKra]rKKraeh]r]r(KKearRrre]r(M4]r(]rKKra]r(K.KrKKrKKre]rKoKra]rKKraeh]r(]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M5]r(]rKKra]r(KKrK0KrKKre]rK0Kra]r(K-KrKKre]r(K-KrKKre]r(K0KrKKre]rK0Kra]r(K-KrKKre]r(K0KrKKreeh]r]r(KKearRrre]r(M6]r(]rKKra]r(K0KrKKre]r(K-KrKKrKKre]rK0Kra]rK0Kra]r(K-KrKKre]rKKraeh]r]r(KKearRrre]r(M7]r(]rKKra]r(KhKrKKre]rKKraeh]r]r(KKearRrr e]r (M8]r (]r KKr a]r(KKrKKrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrr e]r!(M9]r"(]r#KKr$a]r%(KKr&KKr'e]r((KKr)KKr*e]r+KKr,aeh]r-(]r.(KKe]r/(KKe]r0(KKe]r1(KKe]r2(KKe]r3(K Ke]r4(K Ke]r5(K Ke]r6(K Ke]r7(KKe]r8(KKe]r9(KKe]r:(KKe]r;(KKe]r<(KKe]r=(KKe]r>(KKe]r?(KKe]r@(KKe]rA(KKe]rB(KKe]rC(KKe]rD(K"Ke]rE(K#Ke]rF(K$Ke]rG(K%Ke]rH(K&Ke]rI(K'KeerJRrKrLe]rM(M:]rN(]rO(KKrPKKrQKKrRe]rSKKrTa]rUKKrVaeh]rW(]rX(KKe]rY(KKe]rZ(KKe]r[(KKe]r\(KKe]r](KKe]r^(K Ke]r_(K Ke]r`(K Ke]ra(K Ke]rb(K Ke]rc(KKe]rd(KKe]re(KKe]rf(KKe]rg(KKe]rh(KKe]ri(KKe]rj(KKe]rk(KKe]rl(KKe]rm(KKe]rn(KKe]ro(KKe]rp(KKe]rq(KKe]rr(KKe]rs(KKe]rt(KKe]ru(K Ke]rv(K!Ke]rw(K"Ke]rx(K#Ke]ry(K$Ke]rz(K%Ke]r{(K&Ke]r|(K'Keer}Rr~re]r(M;]r(]rKJKra]r(K0KrKKre]rKKraeh]r]r(KJKearRrre]r(M<]r(]r(KKrKKrKKrKKrKKrKKrKKrKKrKKre]rKKraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K Ke]r(K Ke]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K"Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M=]r(]rKKra]rK1Kra]rKKraeh]r]r(KKearRrre]r(M>]r(]r(KKrKKre]rKKraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K Ke]r(K Ke]r(K Ke]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K!Ke]r(K"Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(M?]r(]r(KJKrK0Kre]r(KKrK0KrKKre]r(KJKrKKre]rKKra]r(KKrKKreeh]r (]r (KKe]r (KKe]r (KKe]r (KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'Ke]r(KJKeerRrre]r(M@]r(]rKKra]r(K-Kr KKr!e]r"(KKr#KKr$eeh]r%(]r&(KKe]r'(KKe]r((KKe]r)(KKe]r*(K Ke]r+(K Ke]r,(KKe]r-(KKe]r.(K#Ke]r/(K$Ke]r0(K%Ke]r1(K&Ke]r2(K'Ke]r3(KJKeer4Rr5r6e]r7(MA]r8(]r9(KKr:KKr;e]r<KKr=a]r>KKr?a]r@KKrAa]rB(KKrCKKrDeeh]rE(]rF(KKe]rG(KKe]rH(KKe]rI(KKe]rJ(KKe]rK(KKe]rL(K Ke]rM(K Ke]rN(K Ke]rO(K Ke]rP(KKe]rQ(KKe]rR(KKe]rS(KKe]rT(KKe]rU(KKe]rV(KKe]rW(KKe]rX(KKe]rY(KKe]rZ(KKe]r[(KKe]r\(KKe]r](K"Ke]r^(K#Ke]r_(K$Ke]r`(K%Ke]ra(K&Ke]rb(K'KeercRrdree]rf(MB]rg(]rhKoKria]rj(KKrkKKrlKKrmKKrnK KroKKrpeeh]rq(]rr(KKe]rs(KKe]rt(KKe]ru(KKe]rv(K Ke]rw(K Ke]rx(K#Ke]ry(K$Ke]rz(K%Ke]r{(K&Ke]r|(K'Keer}Rr~re]r(MC]r(]r(KKrKKre]rKKra]r(KKrKKre]rKKra]rKuKra]rK0Kraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MD]r(]rK0Kra]r(K-KrKKre]r(K0KrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(ME]r(]rK0Kra]r(K-KrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MF]r(]r(K/KrK0Kre]r(K-KrK3KrKKre]r(K/KrK0KrKKre]rKKra]r(K-KrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MG]r(]rKQKra]r(K-KrKKre]rKQKra]r(K-KrKKre]r(KQKrKKreeh]r(]r(KKe]r (KKe]r (KKe]r (KKe]r (K Ke]r (K Ke]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MH]r(]r(K/KrK0Kre]r(K-KrKKre]r (K/Kr!K0Kr"KKr#eeh]r$(]r%(KKe]r&(KKe]r'(KKe]r((KKe]r)(KKe]r*(K Ke]r+(K Ke]r,(KKe]r-(KKe]r.(K#Ke]r/(K$Ke]r0(K%Ke]r1(K&Ke]r2(K'Keer3Rr4r5e]r6(MI]r7(]r8(KKr9KKr:e]r;KKr<a]r=KKr>a]r?K5Kr@aeh]rA(]rB(KKe]rC(K%KeerDRrErFe]rG(MJ]rH(]rIKKrJa]rK(K-KrLKKrMe]rN(KKrOKKrPeeh]rQ(]rR(KKe]rS(K%KeerTRrUrVe]rW(MK]rX(]rYK%KrZa]r[(KJKr\KKr]e]r^K0Kr_a]r`KKraaeh]rb]rc(K%KeardRrerfe]rg(ML]rh(]ri(KKrjKKrkK Krle]rm(K5KrnKKKroe]rpK%Krqa]rrKKrsa]rtKKrua]rvK5Krwa]rxK8Kryaeh]rz(]r{(KKe]r|(KKe]r}(K Keer~Rrre]r(MM]r(]rKKra]rKJKra]rKLKra]r(KKrKKre]rKJKra]rKJKra]rKLKra]rKLK ra]rKKra]r(KuK rKKrKKrKK re]rKJK ra]rKLK ra]r(KKrKK reeh]r]r(KKearRrre]r(MN]r(]r(KKrK.KrKKre]r(K-KrKKrKKre]rKKra]r(K-KrK2KrKKre]r(K.KrKK re]r(K-KrKKre]rKKra]r(KKrK.KrKKrKKre]rK0K ra]r(K-KrK2K rKK re]r(K-KrKK re]rK0Kraeh]r(]r(KKe]r(KKe]r(K%Ke]r(K.KeerRrre]r(MO]r(]r(KKrK.KrKKre]r(K-KrKKrKKre]rKKra]r(K-KrK2KrKKre]r(K.KrKK re]r(K-KrKKre]rKKra]r(KKrK.KrKKrKKre]rK0K ra]r(K-KrK2K rKK re]r(K-KrKK re]rK0Kraeh]r(]r(KKe]r(KKe]r(K%Ke]r(K.KeerRrre]r(MP]r(]r (KKr KKr e]r KKr a]rKKra]rK5Kraeh]r(]r(KKe]r(K%KeerRrre]r(MQ]r(]rKKra]r(K-KrKKre]r(KKr KKr!eeh]r"(]r#(KKe]r$(K%Keer%Rr&r'e]r((MR]r)(]r*K%Kr+a]r,KKr-aeh]r.]r/(K%Kear0Rr1r2e]r3(MS]r4(]r5K Kr6a]r7K0Kr8a]r9KJKr:a]r;KLKr<a]r=(KuKr>KKr?e]r@KJKrAa]rBKLKrCa]rDKKrEaeh]rF]rG(K KearHRrIrJe]rK(MT]rL(]rMK0KrNa]rO(KfKrPKKrQe]rRK1KrSa]rTKKrUaeh]rV(]rW(KKe]rX(KKe]rY(KKe]rZ(KKe]r[(K Ke]r\(K Ke]r](KKe]r^(KKe]r_(K#Ke]r`(K$Ke]ra(K%Ke]rb(K&Ke]rc(K'KeerdRrerfe]rg(MU]rh(]riK!Krja]rkKKrla]rm(K-KrnKJKroe]rpKLKrqa]rrKKrsaeh]rt]ru(K!KearvRrwrxe]ry(MV]rz(]r{KfKr|a]r}K1Kr~a]rKKraeh]r]r(KfKearRrre]r(MW]r(]rKKra]r(KKrKKreeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MX]r(]r(KKrKhKre]rK0Kra]rKKraeh]r(]r(KKe]r(KKe]r(KKe]r(KKe]r(K Ke]r(K Ke]r(KKe]r(KKe]r(KKe]r(K#Ke]r(K$Ke]r(K%Ke]r(K&Ke]r(K'KeerRrre]r(MY]r(]rK"Kra]r(KKrKKre]rKKraeh]r]r(K"KearRrre]r(MZ]r(]rK7Kra]rKKraeh]r]r(K"KearRrreerRre]r(Ukeywordsrh]r(]r(UandrK+e]r(UasrKfe]r(UassertrK e]r(UbreakrK e]r(UclassrKe]r(UcontinuerKe]r(UdefrKe]r(UdelrKe]r(UelifrKxe]r(UelserKue]r(UexceptrKie]r(UexecrKe]r(UfinallyrKe]r(UforrKe]r(UfromrKe]r(UglobalrKe]r(UifrKe]r(UimportrKe]r(UinrKNe]r(UisrKYe]r(UlambdarKe]r(Unonlocalr Ke]r (Unotr Ke]r (Uorr Ke]r (Upassr Ke]r (Uprintr Ke]r (Uraiser Ke]r (Ureturnr Ke]r (Utryr Ke]r (Uwhiler K e]r (Uwithr K!e]r (Uyieldr K"eer Rr e]r (Ulabelsr ]r (KUEMPTYr r KNr KNr M>Nr KNr KNr KNr! KNr" KNr# K2Nr$ K Nr% KNr& Kjr' Kjr( Kjr) Kjr* Kjr+ Kjr, Kjr- Kjr. Kjr/ Kjr0 Kjr1 Kjr2 Kjr3 Kj r4 Kj r5 Kj r6 Kj r7 Kj r8 Kj r9 Kj r: Kj r; Kj r< Kj r= KNr> K Nr? KNr@ KNrA KNrB M8NrC KNrD M.NrE KjrF MNrG K NrH K$NrI M=NrJ MCNrK MNrL KNrM M NrN MBNrO KNrP MFNrQ MYNrR K NrS M-NrT MENrU KNrV MNrW K)NrX K*NrY K/NrZ K'Nr[ K%Nr\ K&Nr] K1Nr^ K(Nr_ K-Nr` K.Nra K3Nrb K,Nrc K+Nrd K Nre MNrf MANrg M Nrh Kjri MGNrj M Nrk M0Nrl M Nrm KNrn KNro KNrp KNrq KNrr KNrs Kjrt MNru M Nrv MNrw M#Nrx M$Nry M&Nrz MMNr{ MSNr| MUNr} MNr~ MNr MNr Kjr MNr MDNr Kjr MWNr KNr MHNr MNr M4Nr M!Nr M Nr MNr M6Nr M7Nr MZNr Kjr M2Nr K7Nr Kjr M'Nr M(Nr MNr M)Nr M*Nr MONr MNr M/Nr M1Nr MNr Kj r MNNr MNr MLNr K#Nr MNr K"Nr M<Nr K Nr MNr M9Nr MNr MNr MNr MNr M"Nr M%Nr M+Nr M3Nr M5Nr M;Nr M?Nr KNr KNr KNr KNr K0Nr M,Nr MKNr MJNr MINr M@Nr Kjr MNr MPNr MRNr MQNr MTNr MNr K!Nr MXNr ee]r (U number2symbolr h]r (]r (MU file_inputr e]r (MUand_exprr e]r (MUand_testr e]r (MUarglistr e]r (MUargumentr e]r (MU arith_exprr e]r (MU assert_stmtr e]r (MUatomr e]r (MU augassignr e]r (M U break_stmtr e]r (M Uclassdefr e]r (M Ucomp_forr e]r (M Ucomp_ifr e]r (M U comp_iterr e]r (MUcomp_opr e]r (MU comparisonr e]r (MU compound_stmtr e]r (MU continue_stmtr e]r (MU decoratedr e]r (MU decoratorr e]r (MU decoratorsr e]r (MUdel_stmtr e]r (MU dictsetmakerr e]r (MUdotted_as_namer e]r (MUdotted_as_namesr e]r (MU dotted_namer e]r (MU encoding_declr e]r (MU eval_inputr e]r (MU except_clauser e]r (MU exec_stmtr e]r (MUexprr e]r (MU expr_stmtr e]r (M Uexprlistr e]r (M!Ufactorr e]r (M"U flow_stmtr e]r (M#Ufor_stmtr e]r (M$Ufuncdefr e]r (M%U global_stmtr e]r (M&Uif_stmtr e]r (M'Uimport_as_namer e]r (M(Uimport_as_namesr e]r (M)U import_fromr e]r (M*U import_namer e]r (M+U import_stmtr! e]r" (M,Ulambdefr# e]r$ (M-U listmakerr% e]r& (M.Unot_testr' e]r( (M/U old_lambdefr) e]r* (M0Uold_testr+ e]r, (M1Uor_testr- e]r. (M2U parametersr/ e]r0 (M3U pass_stmtr1 e]r2 (M4Upowerr3 e]r4 (M5U print_stmtr5 e]r6 (M6U raise_stmtr7 e]r8 (M7U return_stmtr9 e]r: (M8U shift_exprr; e]r< (M9U simple_stmtr= e]r> (M:U single_inputr? e]r@ (M;UsliceoprA e]rB (M<U small_stmtrC e]rD (M=U star_exprrE e]rF (M>UstmtrG e]rH (M?U subscriptrI e]rJ (M@U subscriptlistrK e]rL (MAUsuiterM e]rN (MBUtermrO e]rP (MCUtestrQ e]rR (MDUtestlistrS e]rT (MEU testlist1rU e]rV (MFU testlist_gexprW e]rX (MGU testlist_saferY e]rZ (MHUtestlist_star_exprr[ e]r\ (MIUtfpdefr] e]r^ (MJUtfplistr_ e]r` (MKUtnamera e]rb (MLUtrailerrc e]rd (MMUtry_stmtre e]rf (MNU typedargslistrg e]rh (MOU varargslistri e]rj (MPUvfpdefrk e]rl (MQUvfplistrm e]rn (MRUvnamero e]rp (MSU while_stmtrq e]rr (MTU with_itemrs e]rt (MUU with_stmtru e]rv (MVUwith_varrw e]rx (MWUxor_exprry e]rz (MXU yield_argr{ e]r| (MYU yield_exprr} e]r~ (MZU yield_stmtr eer Rr e]r (Ustartr Me]r (Ustatesr ]r (]r (]r (KKr KKr KKr e]r KKr ae]r (]r K(Kr a]r (K)Kr KKr ee]r (]r K*Kr a]r (K+Kr KKr ee]r (]r K,Kr a]r (K-Kr KKr e]r (K,Kr KKr ee]r (]r (K.Kr K/Kr K0Kr e]r K1Kr a]r KKr a]r (K2Kr K3Kr KKr e]r K0Kr ae]r (]r K4Kr a]r (KKr KKr KKr ee]r (]r K Kr a]r K0Kr a]r (K-Kr KKr e]r K0Kr a]r KKr ae]r (]r (KKr KKr K Kr K Kr K#Kr K%Kr K&Kr K'Kr e]r (K5Kr K6Kr K7Kr e]r KK r a]r (K8Kr K9K r e]r K:K r a]r (K;Kr KKr K?Kr K@Kr KAKr KBKr KCKr KDKr KEKr KFKr KGKr KHKr KIKr e]r KKr ae]r (]r K Kr a]r KKr ae]r (]r KKr a]r K%Kr a]r (KKr KJKr e]r (K5Kr KKKr e]r KLKr a]r KJKr a]r K5Kr a]r KKr ae]r (]r KKr a]r KMKr a]r KNKr a]r KOKr a]r (KPKr KKr e]r! KKr" ae]r# (]r$ KKr% a]r& KQKr' a]r( (KPKr) KKr* e]r+ KKr, ae]r- (]r. (K3Kr/ KRKr0 e]r1 KKr2 ae]r3 (]r4 (KSKr5 KTKr6 KUKr7 KSKr8 KVKr9 KWKr: KXKr; KNKr< KYKr= KKr> e]r? KKr@ a]rA (KKrB KKrC e]rD KNKrE ae]rF (]rG K1KrH a]rI (KZKrJ KKrK ee]rL (]rM (K[KrN K\KrO K]KrP K^KrQ K_KrR K`KrS KaKrT KbKrU e]rV KKrW ae]rX (]rY KKrZ a]r[ KKr\ ae]r] (]r^ KcKr_ a]r` (K[Kra K^Krb e]rc KKrd ae]re (]rf K Krg a]rh KdKri a]rj (KKrk KKrl e]rm (K5Krn KKKro e]rp KKrq a]rr KKrs a]rt K5Kru ae]rv (]rw KeKrx a]ry (KeKrz KKr{ ee]r| (]r} KKr~ a]r KMKr a]r KKr ae]r (]r (K.Kr K/Kr K0Kr e]r K1Kr a]r (K-Kr K3Kr KKr e]r (K-Kr KJKr K3Kr KKr e]r (K-Kr K3Kr KKr e]r (K/K r K0K r KKr e]r KKr a]r K0Kr a]r (K.K r K0K r KKr e]r (K-Kr KK r e]r K1K r a]r KJK r a]r (K-Kr KK r e]r K0K r ae]r (]r KdKr a]r (KfKr KKr e]r K%Kr a]r KKr ae]r (]r KgKr a]r (K-Kr KKr ee]r (]r K%Kr a]r (KKr KKr ee]r (]r K%Kr a]r KKr ae]r (]r KhKr a]r (KKr KKr e]r KKr ae]r (]r KiKr a]r (K0Kr KKr e]r (K-Kr KfKr KKr e]r K0Kr a]r KKr ae]r (]r KKr a]r K1Kr a]r (KNKr KKr e]r K0Kr a]r (K-Kr KKr e]r K0Kr a]r KKr ae]r (]r KjKr a]r (KkKr KKr ee]r (]r KlKr a]r (K2Kr KmKr KKr e]r (KlKr K7Kr e]r (KhKr K7Kr e]r (K2Kr KKr e]r KKr ae]r (]r (K1Kr K/Kr e]r (K-Kr KKr e]r (K1Kr K/Kr KKr ee]r (]r (KKr KKr K$Kr KnKr e]r KoKr a]r KKr ae]r (]r (KpKr KqKr! KrKr" KsKr# KtKr$ e]r% KKr& ae]r' (]r( KKr) a]r* KMKr+ a]r, KNKr- a]r. KhKr/ a]r0 KJKr1 a]r2 KLKr3 a]r4 (KuKr5 KKr6 e]r7 KJKr8 a]r9 KLK r: a]r; KK r< ae]r= (]r> KKr? a]r@ K%KrA a]rB KvKrC a]rD (KwKrE KJKrF e]rG K0KrH a]rI KLKrJ a]rK KJKrL a]rM KKrN ae]rO (]rP (KKrQ KKrR e]rS K%KrT a]rU (K-KrV KKrW ee]rX (]rY KKrZ a]r[ K0Kr\ a]r] KJKr^ a]r_ KLKr` a]ra (KxKrb KuKrc KKrd e]re KJKrf a]rg KLKrh a]ri KKrj ae]rk (]rl K%Krm a]rn (KfKro KKrp e]rq K%Krr a]rs KKrt ae]ru (]rv KyKrw a]rx (K-Kry KKrz e]r{ (KyKr| KKr} ee]r~ (]r KKr a]r (KKr KdKr e]r (KKr KKr KdKr e]r KKr a]r (KKr KKr KzKr e]r KzKr a]r KKr a]r K5Kr ae]r (]r KKr a]r K{Kr a]r KKr ae]r (]r (K|Kr K}Kr e]r KKr ae]r (]r KKr a]r (KJKr K~Kr e]r K0Kr a]r KJKr a]r KKr ae]r (]r (K/Kr K0Kr e]r (K-Kr K3Kr KKr e]r (K/Kr K0Kr KKr e]r KKr a]r (K-Kr KKr ee]r (]r (KKr KKr e]r K*Kr a]r KKr ae]r (]r KKr a]r (KJKr K~Kr e]r KQKr a]r KJKr a]r KKr ae]r (]r (KKr KKr e]r KKr ae]r (]r KKr a]r (KKr KKr ee]r (]r KKr a]r (K5Kr KKr e]r KKr a]r K5Kr ae]r (]r KKr a]r KKr ae]r (]r KKr a]r (K.Kr KKr KKr e]r KoKr a]r KKr ae]r (]r KKr a]r (KKr K0Kr KKr e]r K0Kr a]r (K-Kr KKr e]r (K-Kr KKr e]r (K0Kr KKr e]r K0Kr a]r (K-Kr KKr e]r (K0Kr KKr ee]r (]r KKr a]r (K0Kr KKr e]r (K-Kr KKr KKr e]r K0Kr a]r K0Kr a]r (K-Kr! KKr" e]r# KKr$ ae]r% (]r& KKr' a]r( (KhKr) KKr* e]r+ KKr, ae]r- (]r. KKr/ a]r0 (KKr1 KKr2 KKr3 ee]r4 (]r5 KKr6 a]r7 (KKr8 KKr9 e]r: (KKr; KKr< e]r= KKr> ae]r? (]r@ (KKrA KKrB KKrC e]rD KKrE a]rF KKrG ae]rH (]rI KJKrJ a]rK (K0KrL KKrM e]rN KKrO ae]rP (]rQ (KKrR KKrS KKrT KKrU KKrV KKrW KKrX KKrY KKrZ e]r[ KKr\ ae]r] (]r^ KKr_ a]r` K1Kra a]rb KKrc ae]rd (]re (KKrf KKrg e]rh KKri ae]rj (]rk (KJKrl K0Krm e]rn (KKro K0Krp KKrq e]rr (KJKrs KKrt e]ru KKrv a]rw (KKrx KKry ee]rz (]r{ KKr| a]r} (K-Kr~ KKr e]r (KKr KKr ee]r (]r (KKr KKr e]r KKr a]r KKr a]r KKr a]r (KKr KKr ee]r (]r KoKr a]r (KKr KKr KKr KKr K Kr KKr ee]r (]r (KKr KKr e]r KKr a]r (KKr KKr e]r KKr a]r KuKr a]r K0Kr ae]r (]r K0Kr a]r (K-Kr KKr e]r (K0Kr KKr ee]r (]r K0Kr a]r (K-Kr KKr ee]r (]r (K/Kr K0Kr e]r (K-Kr K3Kr KKr e]r (K/Kr K0Kr KKr e]r KKr a]r (K-Kr KKr ee]r (]r KQKr a]r (K-Kr KKr e]r KQKr a]r (K-Kr KKr e]r (KQKr KKr ee]r (]r (K/Kr K0Kr e]r (K-Kr KKr e]r (K/Kr K0Kr KKr ee]r (]r (KKr KKr e]r KKr a]r KKr a]r K5Kr ae]r (]r KKr a]r (K-Kr KKr e]r (KKr KKr ee]r (]r K%Kr a]r (KJKr KKr e]r K0Kr a]r KKr ae]r (]r(KKrKKrK Kre]r(K5KrKKKre]rK%Kra]r KKr a]r KKr a]r K5Kra]rK8Krae]r(]rKKra]rKJKra]rKLKra]r(KKrKKre]rKJKra]rKJKra]rKLKr a]r!KLK r"a]r#KKr$a]r%(KuK r&KKr'KKr(KK r)e]r*KJK r+a]r,KLK r-a]r.(KKr/KK r0ee]r1(]r2(KKr3K.Kr4KKr5e]r6(K-Kr7KKr8KKr9e]r:KKr;a]r<(K-Kr=K2Kr>KKr?e]r@(K.KrAKK rBe]rC(K-KrDKKrEe]rFKKrGa]rH(KKrIK.KrJKKrKKKrLe]rMK0K rNa]rO(K-KrPK2K rQKK rRe]rS(K-KrTKK rUe]rVK0KrWae]rX(]rY(KKrZK.Kr[KKr\e]r](K-Kr^KKr_KKr`e]raKKrba]rc(K-KrdK2KreKKrfe]rg(K.KrhKK rie]rj(K-KrkKKrle]rmKKrna]ro(KKrpK.KrqKKrrKKrse]rtK0K rua]rv(K-KrwK2K rxKK rye]rz(K-Kr{KK r|e]r}K0Kr~ae]r(]r(KKrKKre]rKKra]rKKra]rK5Krae]r(]rKKra]r(K-KrKKre]r(KKrKKree]r(]rK%Kra]rKKrae]r(]rK Kra]rK0Kra]rKJKra]rKLKra]r(KuKrKKre]rKJKra]rKLKra]rKKrae]r(]rK0Kra]r(KfKrKKre]rK1Kra]rKKrae]r(]rK!Kra]rKKra]r(K-KrKJKre]rKLKra]rKKrae]r(]rKfKra]rK1Kra]rKKrae]r(]rKKra]r(KKrKKree]r(]r(KKrKhKre]rK0Kra]rKKrae]r(]rK"Kra]r(KKrKKre]rKKrae]r(]rK7Kra]rKKraeee]r(U symbol2labelrh]r(]r(Uand_exprrKe]r(Uand_testrKe]r(UarglistrKKe]r(UargumentrK,e]r(U arith_exprrKe]r(U assert_stmtrKe]r(UatomrKe]r(U augassignrKme]r(U break_stmtrKpe]r(UclassdefrK[e]r(Ucomp_forrK3e]r(Ucomp_ifrKRe]r(U comp_iterrKPe]r(Ucomp_oprKZe]r(U comparisonrKe]r(U compound_stmtrKe]r(U continue_stmtrKqe]r(U decoratedrK\e]r(U decoratorr Kee]r (U decoratorsr Kce]r (Udel_stmtr Ke]r(U dictsetmakerrK(Uold_testr?KQe]r@(Uor_testrAKe]rB(U parametersrCKve]rD(U pass_stmtrEKe]rF(UpowerrGKne]rH(U print_stmtrIKe]rJ(U raise_stmtrKKre]rL(U return_stmtrMKse]rN(U shift_exprrOK(e]rP(U simple_stmtrQKe]rR(UsliceoprSKe]rT(U small_stmtrUKe]rV(U star_exprrWK/e]rX(UstmtrYKe]rZ(U subscriptr[Ke]r\(U subscriptlistr]Ke]r^(Usuiter_KLe]r`(UtermraK4e]rb(UtestrcK0e]rd(UtestlistreKhe]rf(U testlist1rgK:e]rh(U testlist_gexpriK6e]rj(U testlist_saferkKOe]rl(Utestlist_star_exprrmKle]rn(UtfpdefroKe]rp(UtfplistrqKe]rr(UtnamersKe]rt(UtrailerruKe]rv(Utry_stmtrwK`e]rx(U typedargslistryKe]rz(U varargslistr{K~e]r|(Uvfpdefr}Ke]r~(UvfplistrKe]r(UvnamerKe]r(U while_stmtrKae]r(U with_itemrKe]r(U with_stmtrKbe]r(Uxor_exprrKje]r(U yield_argrKe]r(U yield_exprrK7e]r(U yield_stmtrKteerRre]r(U symbol2numberrh]r(]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j M e]r(j M e]r(j M e]r(j M e]r(j M e]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j Me]r(j M e]r(j M!e]r(j Me]r(j M"e]r(j M#e]r(j M$e]r(j M%e]r(j M&e]r(j M'e]r(j M(e]r(j M)e]r(j M*e]r(j! M+e]r(j# M,e]r(j% M-e]r(j' M.e]r(j) M/e]r(j+ M0e]r(j- M1e]r(j/ M2e]r(j1 M3e]r(j3 M4e]r(j5 M5e]r(j7 M6e]r(j9 M7e]r(j; M8e]r(j= M9e]r(j? M:e]r(jA M;e]r(jC M<e]r(jE M=e]r(jG M>e]r(jI M?e]r(jK M@e]r(jM MAe]r(jO MBe]r(jQ MCe]r(jS MDe]r(jU MEe]r(jW MFe]r(jY MGe]r(j[ MHe]r(j] MIe]r(j_ MJe]r(ja MKe]r(jc MLe]r(je MMe]r(jg MNe]r(ji MOe]r(jk MPe]r(jm MQe]r(jo MRe]r(jq MSe]r(js MTe]r(ju MUe]r(jw MVe]r(jy MWe]r(j{ MXe]r(j} MYe]r(j MZeerRre]r(Utokensrh]r(]r(KKe]r(KK%e]r(KK&e]r(KK'e]r(KKe]r(KKe]r(KKe]r(KKe]r(KK5e]r(K K e]r(K K8e]r(K KJe]r(K K-e]r(K Ke]r(KKe]r(KKe]r(KKe]r(KKe]r(KKke]r(KK)e]r (KKTe]r (KKWe]r (KK2e]r (KKe]r (KKe]r(KK e]r(KK#e]r(KK;e]r(KKVe]r(KKSe]r(KKUe]r(KKXe]r(K K$e]r(K!Ke]r(K"Ke]r(K#Ke]r(K$K.e]r(K%KAe]r(K&KBe]r(K'K@e]r(K(KDe]r(K)K=e]r(K*K>e]r (K+KIe]r!(K,KHe]r"(K-KEe]r#(K.KFe]r$(K/K?e]r%(K0Ke]r&(K1KCe]r'(K2K e]r((K3KGe]r)(K7Kweer*Rr+eer,Rr-.PatternGrammar.txt000064400000001431147204472210010233 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. # A grammar to describe tree matching patterns. # Not shown here: # - 'TOKEN' stands for any token (leaf node) # - 'any' stands for any node (leaf or interior) # With 'any' we can still specify the sub-structure. # The start symbol is 'Matcher'. Matcher: Alternatives ENDMARKER Alternatives: Alternative ('|' Alternative)* Alternative: (Unit | NegatedUnit)+ Unit: [NAME '='] ( STRING [Repeater] | NAME [Details] [Repeater] | '(' Alternatives ')' [Repeater] | '[' Alternatives ']' ) NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' Details: '<' Alternatives '>' PatternGrammar2.7.18.final.0.pickle000064400000005357147204472210012602 0ustar00ccollections OrderedDict q]q(]q(Udfasqh]q(]q(M]q(]qKKqa]q KKq a]q KKq aeh]q (]q(KKe]q(KKe]q(KKe]q(KKe]q(KKeeqRqqe]q(M]q(]q(KKqK Kqe]q(KKqK KqKKqeeh]q(]q (KKe]q!(KKe]q"(KKe]q#(KKe]q$(KKeeq%Rq&q'e]q((M]q)(]q*K Kq+a]q,(K Kq-KKq.eeh]q/(]q0(KKe]q1(KKe]q2(KKe]q3(KKe]q4(KKeeq5Rq6q7e]q8(M]q9(]q:K Kq;a]qK Kq?a]q@KKqAaeh]qB]qC(K KeaqDRqEqFe]qG(M]qH(]qIKKqJa]qK(KKqLKKqMKKqNe]qOKKqPa]qQ(KKqRKKqSe]qTKKqUa]qVKKqWaeh]qX]qY(KKeaqZRq[q\e]q](M]q^(]q_(KKq`KKqaKKqbe]qcKKqda]qeKKqfa]qg(KKqhKKqie]qjKKqka]qlKKqmaeh]qn(]qo(KKe]qp(KKe]qq(KKeeqrRqsqte]qu(M]qv(]qw(KKqxKKqyKKqzKKq{e]q|KKq}a]q~KKqa]q(KKqKKqKKqKKqe]q(KKqKKqe]qKKqa]qKKqa]q(KKqKKqKK qKKqe]qKKqa]q(KKqKKqKK qeeh]q(]q(KKe]q(KKe]q(KKe]q(KKeeqRqqeeqRqe]q(Ukeywordsqh]q]q(UnotqKeaqRqe]q(Ulabelsq]q(KUEMPTYqqMNqKNqKNqK NqKhqKNqKNqMNqMNqMNqKNqKNqKNqMNqKNqKNqKNqKNqKNqK NqKNqKNqMNqK Nqee]q(U number2symbolqh]q(]q(MUMatcherqe]q(MU Alternativeqe]q(MU Alternativesqe]q(MUDetailsqe]q(MU NegatedUnitqe]q(MURepeaterqe]q(MUUnitqeeqRqe]q(UstartqMe]q(Ustatesq]q(]q(]qKKqa]qKKqa]qKKqae]q(]q(KKqK Kqe]q(KKqK KqKKqee]q(]qK Kqa]q(K KqKKqee]q(]qK Kqa]qKKqa]qK Kqa]qKKqae]q(]qKKqa]q(KKqKKrKKre]rKKra]r(KKrKKre]rKKra]r KKr ae]r (]r (KKr KKrKKre]rKKra]rKKra]r(KKrKKre]rKKra]rKKrae]r(]r(KKrKKrKKrKKr e]r!KKr"a]r#KKr$a]r%(KKr&KKr'KKr(KKr)e]r*(KKr+KKr,e]r-KKr.a]r/KKr0a]r1(KKr2KKr3KK r4KKr5e]r6KKr7a]r8(KKr9KKr:KK r;eeee]r<(U symbol2labelr=h]r>(]r?(U Alternativer@K e]rA(U AlternativesrBKe]rC(UDetailsrDKe]rE(U NegatedUnitrFKe]rG(URepeaterrHKe]rI(UUnitrJK eerKRrLe]rM(U symbol2numberrNh]rO(]rP(hMe]rQ(hMe]rR(hMe]rS(hMe]rT(hMe]rU(hMe]rV(hMeerWRrXe]rY(UtokensrZh]r[(]r\(KKe]r](KKe]r^(KKe]r_(KKe]r`(KKe]ra(KKe]rb(K Ke]rc(K Ke]rd(K Ke]re(KKe]rf(KKe]rg(KK e]rh(KK e]ri(KK e]rj(KKe]rk(KKe]rl(KKeermRrneeroRrp.__init__.py000064400000000007147204472210006655 0ustar00#empty __init__.pyc000064400000000177147204472210007030 0ustar00 {fc@sdS(N((((s(/usr/lib64/python2.7/lib2to3/__init__.pytt__init__.pyo000064400000000177147204472210007044 0ustar00 {fc@sdS(N((((s(/usr/lib64/python2.7/lib2to3/__init__.pytt__main__.py000064400000000103147204472210006633 0ustar00import sys from .main import main sys.exit(main("lib2to3.fixes")) __main__.pyc000064400000000362147204472210007005 0ustar00 {fc@s3ddlZddlmZejeddS(iNi(tmains lib2to3.fixes(tsysRtexit(((s(/usr/lib64/python2.7/lib2to3/__main__.pyts __main__.pyo000064400000000362147204472210007021 0ustar00 {fc@s3ddlZddlmZejeddS(iNi(tmains lib2to3.fixes(tsysRtexit(((s(/usr/lib64/python2.7/lib2to3/__main__.pyts btm_matcher.py000064400000015262147204472210007414 0ustar00"""A bottom-up tree matching algorithm implementation meant to speed up 2to3's matching process. After the tree patterns are reduced to their rarest linear path, a linear Aho-Corasick automaton is created. The linear automaton traverses the linear paths from the leaves to the root of the AST and returns a set of nodes for further matching. This reduces significantly the number of candidate nodes.""" __author__ = "George Boutsioukis " import logging import itertools from collections import defaultdict from . import pytree from .btm_utils import reduce_tree class BMNode(object): """Class for a node of the Aho-Corasick automaton used in matching""" count = itertools.count() def __init__(self): self.transition_table = {} self.fixers = [] self.id = next(BMNode.count) self.content = '' class BottomMatcher(object): """The main matcher class. After instantiating the patterns should be added using the add_fixer method""" def __init__(self): self.match = set() self.root = BMNode() self.nodes = [self.root] self.fixers = [] self.logger = logging.getLogger("RefactoringTool") def add_fixer(self, fixer): """Reduces a fixer's pattern tree to a linear path and adds it to the matcher(a common Aho-Corasick automaton). The fixer is appended on the matching states and called when they are reached""" self.fixers.append(fixer) tree = reduce_tree(fixer.pattern_tree) linear = tree.get_linear_subpattern() match_nodes = self.add(linear, start=self.root) for match_node in match_nodes: match_node.fixers.append(fixer) def add(self, pattern, start): "Recursively adds a linear pattern to the AC automaton" #print("adding pattern", pattern, "to", start) if not pattern: #print("empty pattern") return [start] if isinstance(pattern[0], tuple): #alternatives #print("alternatives") match_nodes = [] for alternative in pattern[0]: #add all alternatives, and add the rest of the pattern #to each end node end_nodes = self.add(alternative, start=start) for end in end_nodes: match_nodes.extend(self.add(pattern[1:], end)) return match_nodes else: #single token #not last if pattern[0] not in start.transition_table: #transition did not exist, create new next_node = BMNode() start.transition_table[pattern[0]] = next_node else: #transition exists already, follow next_node = start.transition_table[pattern[0]] if pattern[1:]: end_nodes = self.add(pattern[1:], start=next_node) else: end_nodes = [next_node] return end_nodes def run(self, leaves): """The main interface with the bottom matcher. The tree is traversed from the bottom using the constructed automaton. Nodes are only checked once as the tree is retraversed. When the automaton fails, we give it one more shot(in case the above tree matches as a whole with the rejected leaf), then we break for the next leaf. There is the special case of multiple arguments(see code comments) where we recheck the nodes Args: The leaves of the AST tree to be matched Returns: A dictionary of node matches with fixers as the keys """ current_ac_node = self.root results = defaultdict(list) for leaf in leaves: current_ast_node = leaf while current_ast_node: current_ast_node.was_checked = True for child in current_ast_node.children: # multiple statements, recheck if isinstance(child, pytree.Leaf) and child.value == u";": current_ast_node.was_checked = False break if current_ast_node.type == 1: #name node_token = current_ast_node.value else: node_token = current_ast_node.type if node_token in current_ac_node.transition_table: #token matches current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if not fixer in results: results[fixer] = [] results[fixer].append(current_ast_node) else: #matching failed, reset automaton current_ac_node = self.root if (current_ast_node.parent is not None and current_ast_node.parent.was_checked): #the rest of the tree upwards has been checked, next leaf break #recheck the rejected node once from the root if node_token in current_ac_node.transition_table: #token matches current_ac_node = current_ac_node.transition_table[node_token] for fixer in current_ac_node.fixers: if not fixer in results.keys(): results[fixer] = [] results[fixer].append(current_ast_node) current_ast_node = current_ast_node.parent return results def print_ac(self): "Prints a graphviz diagram of the BM automaton(for debugging)" print("digraph g{") def print_node(node): for subnode_key in node.transition_table.keys(): subnode = node.transition_table[subnode_key] print("%d -> %d [label=%s] //%s" % (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) if subnode_key == 1: print(subnode.content) print_node(subnode) print_node(self.root) print("}") # taken from pytree.py for debugging; only used by print_ac _type_reprs = {} def type_repr(type_num): global _type_reprs if not _type_reprs: from .pygram import python_symbols # printing tokens is possible but not as useful # from .pgen2 import token // token.__dict__.items(): for name, val in python_symbols.__dict__.items(): if type(val) == int: _type_reprs[val] = name return _type_reprs.setdefault(type_num, type_num) btm_matcher.pyc000064400000013306147204472210007554 0ustar00 {fc@sdZdZddlZddlZddlmZddlmZddlm Z de fd YZ d e fd YZ ia d ZdS( sA bottom-up tree matching algorithm implementation meant to speed up 2to3's matching process. After the tree patterns are reduced to their rarest linear path, a linear Aho-Corasick automaton is created. The linear automaton traverses the linear paths from the leaves to the root of the AST and returns a set of nodes for further matching. This reduces significantly the number of candidate nodes.s+George Boutsioukis iN(t defaultdicti(tpytree(t reduce_treetBMNodecBs#eZdZejZdZRS(s?Class for a node of the Aho-Corasick automaton used in matchingcCs1i|_g|_ttj|_d|_dS(Nt(ttransition_tabletfixerstnextRtcounttidtcontent(tself((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyt__init__s  (t__name__t __module__t__doc__t itertoolsRR (((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyRs t BottomMatchercBs;eZdZdZdZdZdZdZRS(sgThe main matcher class. After instantiating the patterns should be added using the add_fixer methodcCsFt|_t|_|jg|_g|_tjd|_dS(NtRefactoringTool( tsettmatchRtroottnodesRtloggingt getLoggertlogger(R ((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyR s    cCsh|jj|t|j}|j}|j|d|j}x|D]}|jj|qJWdS(sReduces a fixer's pattern tree to a linear path and adds it to the matcher(a common Aho-Corasick automaton). The fixer is appended on the matching states and called when they are reachedtstartN(RtappendRt pattern_treetget_linear_subpatterntaddR(R tfixerttreetlineart match_nodest match_node((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyt add_fixer%s   cCs|s |gSt|dtrg}xU|dD]I}|j|d|}x+|D]#}|j|j|d|qSWq1W|S|d|jkrt}||j|d     !          cs*dGHfd|jdGHdS(s<Prints a graphviz diagram of the BM automaton(for debugging)s digraph g{csvxo|jjD]^}|j|}d|j|jt|t|jfGH|dkrd|jGHn|qWdS(Ns%d -> %d [label=%s] //%si(RR7R t type_reprtstrRR (tnodet subnode_keytsubnode(t print_node(s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyREs '  t}N(R(R ((REs+/usr/lib64/python2.7/lib2to3/btm_matcher.pytprint_acs (R RRR R$RR?RG(((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyRs   " =cCshtsXddlm}x?|jjD]+\}}t|tkr&|t|s   btm_matcher.pyo000064400000013306147204472210007570 0ustar00 {fc@sdZdZddlZddlZddlmZddlmZddlm Z de fd YZ d e fd YZ ia d ZdS( sA bottom-up tree matching algorithm implementation meant to speed up 2to3's matching process. After the tree patterns are reduced to their rarest linear path, a linear Aho-Corasick automaton is created. The linear automaton traverses the linear paths from the leaves to the root of the AST and returns a set of nodes for further matching. This reduces significantly the number of candidate nodes.s+George Boutsioukis iN(t defaultdicti(tpytree(t reduce_treetBMNodecBs#eZdZejZdZRS(s?Class for a node of the Aho-Corasick automaton used in matchingcCs1i|_g|_ttj|_d|_dS(Nt(ttransition_tabletfixerstnextRtcounttidtcontent(tself((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyt__init__s  (t__name__t __module__t__doc__t itertoolsRR (((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyRs t BottomMatchercBs;eZdZdZdZdZdZdZRS(sgThe main matcher class. After instantiating the patterns should be added using the add_fixer methodcCsFt|_t|_|jg|_g|_tjd|_dS(NtRefactoringTool( tsettmatchRtroottnodesRtloggingt getLoggertlogger(R ((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyR s    cCsh|jj|t|j}|j}|j|d|j}x|D]}|jj|qJWdS(sReduces a fixer's pattern tree to a linear path and adds it to the matcher(a common Aho-Corasick automaton). The fixer is appended on the matching states and called when they are reachedtstartN(RtappendRt pattern_treetget_linear_subpatterntaddR(R tfixerttreetlineart match_nodest match_node((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyt add_fixer%s   cCs|s |gSt|dtrg}xU|dD]I}|j|d|}x+|D]#}|j|j|d|qSWq1W|S|d|jkrt}||j|d     !          cs*dGHfd|jdGHdS(s<Prints a graphviz diagram of the BM automaton(for debugging)s digraph g{csvxo|jjD]^}|j|}d|j|jt|t|jfGH|dkrd|jGHn|qWdS(Ns%d -> %d [label=%s] //%si(RR7R t type_reprtstrRR (tnodet subnode_keytsubnode(t print_node(s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyREs '  t}N(R(R ((REs+/usr/lib64/python2.7/lib2to3/btm_matcher.pytprint_acs (R RRR R$RR?RG(((s+/usr/lib64/python2.7/lib2to3/btm_matcher.pyRs   " =cCshtsXddlm}x?|jjD]+\}}t|tkr&|t|s   btm_utils.py000064400000023434147204472210007131 0ustar00"Utility functions used by the btm_matcher module" from . import pytree from .pgen2 import grammar, token from .pygram import pattern_symbols, python_symbols syms = pattern_symbols pysyms = python_symbols tokens = grammar.opmap token_labels = token TYPE_ANY = -1 TYPE_ALTERNATIVES = -2 TYPE_GROUP = -3 class MinNode(object): """This class serves as an intermediate representation of the pattern tree during the conversion to sets of leaf-to-root subpatterns""" def __init__(self, type=None, name=None): self.type = type self.name = name self.children = [] self.leaf = False self.parent = None self.alternatives = [] self.group = [] def __repr__(self): return str(self.type) + ' ' + str(self.name) def leaf_to_root(self): """Internal method. Returns a characteristic path of the pattern tree. This method must be run for all leaves until the linear subpatterns are merged into a single""" node = self subp = [] while node: if node.type == TYPE_ALTERNATIVES: node.alternatives.append(subp) if len(node.alternatives) == len(node.children): #last alternative subp = [tuple(node.alternatives)] node.alternatives = [] node = node.parent continue else: node = node.parent subp = None break if node.type == TYPE_GROUP: node.group.append(subp) #probably should check the number of leaves if len(node.group) == len(node.children): subp = get_characteristic_subpattern(node.group) node.group = [] node = node.parent continue else: node = node.parent subp = None break if node.type == token_labels.NAME and node.name: #in case of type=name, use the name instead subp.append(node.name) else: subp.append(node.type) node = node.parent return subp def get_linear_subpattern(self): """Drives the leaf_to_root method. The reason that leaf_to_root must be run multiple times is because we need to reject 'group' matches; for example the alternative form (a | b c) creates a group [b c] that needs to be matched. Since matching multiple linear patterns overcomes the automaton's capabilities, leaf_to_root merges each group into a single choice based on 'characteristic'ity, i.e. (a|b c) -> (a|b) if b more characteristic than c Returns: The most 'characteristic'(as defined by get_characteristic_subpattern) path for the compiled pattern tree. """ for l in self.leaves(): subp = l.leaf_to_root() if subp: return subp def leaves(self): "Generator that returns the leaves of the tree" for child in self.children: for x in child.leaves(): yield x if not self.children: yield self def reduce_tree(node, parent=None): """ Internal function. Reduces a compiled pattern tree to an intermediate representation suitable for feeding the automaton. This also trims off any optional pattern elements(like [a], a*). """ new_node = None #switch on the node type if node.type == syms.Matcher: #skip node = node.children[0] if node.type == syms.Alternatives : #2 cases if len(node.children) <= 2: #just a single 'Alternative', skip this node new_node = reduce_tree(node.children[0], parent) else: #real alternatives new_node = MinNode(type=TYPE_ALTERNATIVES) #skip odd children('|' tokens) for child in node.children: if node.children.index(child)%2: continue reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) elif node.type == syms.Alternative: if len(node.children) > 1: new_node = MinNode(type=TYPE_GROUP) for child in node.children: reduced = reduce_tree(child, new_node) if reduced: new_node.children.append(reduced) if not new_node.children: # delete the group if all of the children were reduced to None new_node = None else: new_node = reduce_tree(node.children[0], parent) elif node.type == syms.Unit: if (isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '('): #skip parentheses return reduce_tree(node.children[1], parent) if ((isinstance(node.children[0], pytree.Leaf) and node.children[0].value == '[') or (len(node.children)>1 and hasattr(node.children[1], "value") and node.children[1].value == '[')): #skip whole unit if its optional return None leaf = True details_node = None alternatives_node = None has_repeater = False repeater_node = None has_variable_name = False for child in node.children: if child.type == syms.Details: leaf = False details_node = child elif child.type == syms.Repeater: has_repeater = True repeater_node = child elif child.type == syms.Alternatives: alternatives_node = child if hasattr(child, 'value') and child.value == '=': # variable name has_variable_name = True #skip variable name if has_variable_name: #skip variable name, '=' name_leaf = node.children[2] if hasattr(name_leaf, 'value') and name_leaf.value == '(': # skip parenthesis name_leaf = node.children[3] else: name_leaf = node.children[0] #set node type if name_leaf.type == token_labels.NAME: #(python) non-name or wildcard if name_leaf.value == 'any': new_node = MinNode(type=TYPE_ANY) else: if hasattr(token_labels, name_leaf.value): new_node = MinNode(type=getattr(token_labels, name_leaf.value)) else: new_node = MinNode(type=getattr(pysyms, name_leaf.value)) elif name_leaf.type == token_labels.STRING: #(python) name or character; remove the apostrophes from #the string value name = name_leaf.value.strip("'") if name in tokens: new_node = MinNode(type=tokens[name]) else: new_node = MinNode(type=token_labels.NAME, name=name) elif name_leaf.type == syms.Alternatives: new_node = reduce_tree(alternatives_node, parent) #handle repeaters if has_repeater: if repeater_node.children[0].value == '*': #reduce to None new_node = None elif repeater_node.children[0].value == '+': #reduce to a single occurrence i.e. do nothing pass else: #TODO: handle {min, max} repeaters raise NotImplementedError pass #add children if details_node and new_node is not None: for child in details_node.children[1:-1]: #skip '<', '>' markers reduced = reduce_tree(child, new_node) if reduced is not None: new_node.children.append(reduced) if new_node: new_node.parent = parent return new_node def get_characteristic_subpattern(subpatterns): """Picks the most characteristic from a list of linear patterns Current order used is: names > common_names > common_chars """ if not isinstance(subpatterns, list): return subpatterns if len(subpatterns)==1: return subpatterns[0] # first pick out the ones containing variable names subpatterns_with_names = [] subpatterns_with_common_names = [] common_names = ['in', 'for', 'if' , 'not', 'None'] subpatterns_with_common_chars = [] common_chars = "[]().,:" for subpattern in subpatterns: if any(rec_test(subpattern, lambda x: type(x) is str)): if any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_chars)): subpatterns_with_common_chars.append(subpattern) elif any(rec_test(subpattern, lambda x: isinstance(x, str) and x in common_names)): subpatterns_with_common_names.append(subpattern) else: subpatterns_with_names.append(subpattern) if subpatterns_with_names: subpatterns = subpatterns_with_names elif subpatterns_with_common_names: subpatterns = subpatterns_with_common_names elif subpatterns_with_common_chars: subpatterns = subpatterns_with_common_chars # of the remaining subpatterns pick out the longest one return max(subpatterns, key=len) def rec_test(sequence, test_func): """Tests test_func on all items of sequence and items of included sub-iterables""" for x in sequence: if isinstance(x, (list, tuple)): for y in rec_test(x, test_func): yield y else: yield test_func(x) btm_utils.pyc000064400000016613147204472210007275 0ustar00 {fc@sdZddlmZddlmZmZddlmZmZeZ eZ ej Z eZ dZdZdZdefd YZd d Zd Zd Zd S(s0Utility functions used by the btm_matcher modulei(tpytree(tgrammarttoken(tpattern_symbolstpython_symbolsiiitMinNodecBsAeZdZdddZdZdZdZdZRS(sThis class serves as an intermediate representation of the pattern tree during the conversion to sets of leaf-to-root subpatternscCsC||_||_g|_t|_d|_g|_g|_dS(N( ttypetnametchildrentFalsetleaftNonetparentt alternativestgroup(tselfRR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt__init__s      cCst|jdt|jS(Nt (tstrRR(R((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt__repr__scCsU|}g}xB|rP|jtkr|jj|t|jt|jkr|t|jg}g|_|j}qq|j}d}Pn|jt kr|j j|t|j t|jkrt |j }g|_ |j}qq|j}d}Pn|jt j kr4|jr4|j|jn|j|j|j}qW|S(sInternal method. Returns a characteristic path of the pattern tree. This method must be run for all leaves until the linear subpatterns are merged into a singleN(RtTYPE_ALTERNATIVESR tappendtlenRttupleR R t TYPE_GROUPRtget_characteristic_subpatternt token_labelstNAMER(Rtnodetsubp((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt leaf_to_root!s8        cCs1x*|jD]}|j}|r |Sq WdS(sDrives the leaf_to_root method. The reason that leaf_to_root must be run multiple times is because we need to reject 'group' matches; for example the alternative form (a | b c) creates a group [b c] that needs to be matched. Since matching multiple linear patterns overcomes the automaton's capabilities, leaf_to_root merges each group into a single choice based on 'characteristic'ity, i.e. (a|b c) -> (a|b) if b more characteristic than c Returns: The most 'characteristic'(as defined by get_characteristic_subpattern) path for the compiled pattern tree. N(tleavesR(RtlR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pytget_linear_subpatternKs ccsEx-|jD]"}x|jD] }|VqWq W|jsA|VndS(s-Generator that returns the leaves of the treeN(RR(Rtchildtx((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyR`s   N( t__name__t __module__t__doc__R RRRR!R(((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRs   * c Csd}|jtjkr(|jd}n|jtjkrt|jdkrht|jd|}qtdt }x|jD]P}|jj |drqnt||}|dk r|jj |qqWn$|jtj krxt|jdkr_tdt }x9|jD].}t||}|r|jj |qqW|jsud}quqt|jd|}n|jtjkrt|jdtjr|jdjdkrt|jd|St|jdtjr|jdjdks=t|jdkrAt|jddrA|jdjdkrAdSt}d}d}t}d} t} x|jD]}|jtjkrt}|}n<|jtjkrt}|} n|jtjkr|}nt|dro|jdkrot} qoqoW| rA|jd} t| drN| jdkrN|jd } qNn |jd} | jtjkr| jd krtdt}qTtt| jrtdtt| j}qTtdtt| j}n| jtjkr0| jjd } | tkrtdt| }qTtdtjd | }n$| jtjkrTt||}n|r| jdjd kryd}q| jdjdkrqt n|r|dk rxI|jdd!D]4}t||}|dk r|jj |qqWqn|r||_!n|S(s Internal function. Reduces a compiled pattern tree to an intermediate representation suitable for feeding the automaton. This also trims off any optional pattern elements(like [a], a*). iiRit(t[tvaluet=itanyt'Rt*t+iN("R RtsymstMatcherRt AlternativesRt reduce_treeRRtindexRt AlternativeRtUnitt isinstanceRtLeafR)thasattrtTrueR tDetailstRepeaterRRtTYPE_ANYtgetattrtpysymstSTRINGtstripttokenstNotImplementedErrorR ( RR tnew_nodeR"treducedR t details_nodetalternatives_nodet has_repeatert repeater_nodethas_variable_namet name_leafR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyR2hs             cs,t|ts|St|dkr-|dSg}g}dddddgg}dx|D]}tt|d ratt|fd r|j|qtt|fd r|j|q|j|qaqaW|r|}n|r |}n|r|}nt|d tS( sPicks the most characteristic from a list of linear patterns Current order used is: names > common_names > common_chars iitintfortiftnotR s[]().,:cSst|tkS(N(RR(R#((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyttcst|to|kS(N(R6R(R#(t common_chars(s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRORPcst|to|kS(N(R6R(R#(t common_names(s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRORPtkey(R6tlistRR+trec_testRtmax(t subpatternstsubpatterns_with_namestsubpatterns_with_common_namestsubpatterns_with_common_charst subpattern((RQRRs)/usr/lib64/python2.7/lib2to3/btm_utils.pyRs2      ccsWxP|D]H}t|ttfrDx*t||D] }|Vq2Wq||VqWdS(sPTests test_func on all items of sequence and items of included sub-iterablesN(R6RTRRU(tsequencet test_funcR#ty((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRUs   N(R&RPRtpgen2RRtpygramRRR/R>topmapRARR<RRtobjectRR R2RRU(((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyts X %btm_utils.pyo000064400000016613147204472210007311 0ustar00 {fc@sdZddlmZddlmZmZddlmZmZeZ eZ ej Z eZ dZdZdZdefd YZd d Zd Zd Zd S(s0Utility functions used by the btm_matcher modulei(tpytree(tgrammarttoken(tpattern_symbolstpython_symbolsiiitMinNodecBsAeZdZdddZdZdZdZdZRS(sThis class serves as an intermediate representation of the pattern tree during the conversion to sets of leaf-to-root subpatternscCsC||_||_g|_t|_d|_g|_g|_dS(N( ttypetnametchildrentFalsetleaftNonetparentt alternativestgroup(tselfRR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt__init__s      cCst|jdt|jS(Nt (tstrRR(R((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt__repr__scCsU|}g}xB|rP|jtkr|jj|t|jt|jkr|t|jg}g|_|j}qq|j}d}Pn|jt kr|j j|t|j t|jkrt |j }g|_ |j}qq|j}d}Pn|jt j kr4|jr4|j|jn|j|j|j}qW|S(sInternal method. Returns a characteristic path of the pattern tree. This method must be run for all leaves until the linear subpatterns are merged into a singleN(RtTYPE_ALTERNATIVESR tappendtlenRttupleR R t TYPE_GROUPRtget_characteristic_subpatternt token_labelstNAMER(Rtnodetsubp((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyt leaf_to_root!s8        cCs1x*|jD]}|j}|r |Sq WdS(sDrives the leaf_to_root method. The reason that leaf_to_root must be run multiple times is because we need to reject 'group' matches; for example the alternative form (a | b c) creates a group [b c] that needs to be matched. Since matching multiple linear patterns overcomes the automaton's capabilities, leaf_to_root merges each group into a single choice based on 'characteristic'ity, i.e. (a|b c) -> (a|b) if b more characteristic than c Returns: The most 'characteristic'(as defined by get_characteristic_subpattern) path for the compiled pattern tree. N(tleavesR(RtlR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pytget_linear_subpatternKs ccsEx-|jD]"}x|jD] }|VqWq W|jsA|VndS(s-Generator that returns the leaves of the treeN(RR(Rtchildtx((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyR`s   N( t__name__t __module__t__doc__R RRRR!R(((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRs   * c Csd}|jtjkr(|jd}n|jtjkrt|jdkrht|jd|}qtdt }x|jD]P}|jj |drqnt||}|dk r|jj |qqWn$|jtj krxt|jdkr_tdt }x9|jD].}t||}|r|jj |qqW|jsud}quqt|jd|}n|jtjkrt|jdtjr|jdjdkrt|jd|St|jdtjr|jdjdks=t|jdkrAt|jddrA|jdjdkrAdSt}d}d}t}d} t} x|jD]}|jtjkrt}|}n<|jtjkrt}|} n|jtjkr|}nt|dro|jdkrot} qoqoW| rA|jd} t| drN| jdkrN|jd } qNn |jd} | jtjkr| jd krtdt}qTtt| jrtdtt| j}qTtdtt| j}n| jtjkr0| jjd } | tkrtdt| }qTtdtjd | }n$| jtjkrTt||}n|r| jdjd kryd}q| jdjdkrqt n|r|dk rxI|jdd!D]4}t||}|dk r|jj |qqWqn|r||_!n|S(s Internal function. Reduces a compiled pattern tree to an intermediate representation suitable for feeding the automaton. This also trims off any optional pattern elements(like [a], a*). iiRit(t[tvaluet=itanyt'Rt*t+iN("R RtsymstMatcherRt AlternativesRt reduce_treeRRtindexRt AlternativeRtUnitt isinstanceRtLeafR)thasattrtTrueR tDetailstRepeaterRRtTYPE_ANYtgetattrtpysymstSTRINGtstripttokenstNotImplementedErrorR ( RR tnew_nodeR"treducedR t details_nodetalternatives_nodet has_repeatert repeater_nodethas_variable_namet name_leafR((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyR2hs             cs,t|ts|St|dkr-|dSg}g}dddddgg}dx|D]}tt|d ratt|fd r|j|qtt|fd r|j|q|j|qaqaW|r|}n|r |}n|r|}nt|d tS( sPicks the most characteristic from a list of linear patterns Current order used is: names > common_names > common_chars iitintfortiftnotR s[]().,:cSst|tkS(N(RR(R#((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyttcst|to|kS(N(R6R(R#(t common_chars(s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRORPcst|to|kS(N(R6R(R#(t common_names(s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRORPtkey(R6tlistRR+trec_testRtmax(t subpatternstsubpatterns_with_namestsubpatterns_with_common_namestsubpatterns_with_common_charst subpattern((RQRRs)/usr/lib64/python2.7/lib2to3/btm_utils.pyRs2      ccsWxP|D]H}t|ttfrDx*t||D] }|Vq2Wq||VqWdS(sPTests test_func on all items of sequence and items of included sub-iterablesN(R6RTRRU(tsequencet test_funcR#ty((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyRUs   N(R&RPRtpgen2RRtpygramRRR/R>topmapRARR<RRtobjectRR R2RRU(((s)/usr/lib64/python2.7/lib2to3/btm_utils.pyts X %fixer_base.py000064400000015174147204472210007240 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Base class for fixers (optional, but recommended).""" # Python imports import itertools # Local imports from .patcomp import PatternCompiler from . import pygram from .fixer_util import does_tree_import class BaseFix(object): """Optional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. """ PATTERN = None # Most subclasses should override with a string literal pattern = None # Compiled pattern, set by compile_pattern() pattern_tree = None # Tree representation of the pattern options = None # Options object passed to initializer filename = None # The filename (set by set_filename) logger = None # A logger (set by set_filename) numbers = itertools.count(1) # For new_name() used_names = set() # A set of all used NAMEs order = "post" # Does the fixer prefer pre- or post-order traversal explicit = False # Is this ignored by refactor.py -f all? run_order = 5 # Fixers will be sorted by run order before execution # Lower numbers will be run first. _accept_type = None # [Advanced and not public] This tells RefactoringTool # which node type to accept when there's not a pattern. keep_line_order = False # For the bottom matcher: match with the # original line order BM_compatible = False # Compatibility with the bottom matching # module; every fixer should set this # manually # Shortcut for access to Python grammar symbols syms = pygram.python_symbols def __init__(self, options, log): """Initializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. """ self.options = options self.log = log self.compile_pattern() def compile_pattern(self): """Compiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). """ if self.PATTERN is not None: PC = PatternCompiler() self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, with_tree=True) def set_filename(self, filename): """Set the filename, and a logger derived from it. The main refactoring tool should call this. """ self.filename = filename def match(self, node): """Returns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. """ results = {"node": node} return self.pattern.match(node, results) and results def transform(self, node, results): """Returns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. """ raise NotImplementedError() def new_name(self, template=u"xxx_todo_changeme"): """Return a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. """ name = template while name in self.used_names: name = template + unicode(self.numbers.next()) self.used_names.add(name) return name def log_message(self, message): if self.first_log: self.first_log = False self.log.append("### In file %s ###" % self.filename) self.log.append(message) def cannot_convert(self, node, reason=None): """Warn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() for_output = node.clone() for_output.prefix = u"" msg = "Line %d: could not convert: %s" self.log_message(msg % (lineno, for_output)) if reason: self.log_message(reason) def warning(self, node, reason): """Used for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. """ lineno = node.get_lineno() self.log_message("Line %d: %s" % (lineno, reason)) def start_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ self.used_names = tree.used_names self.set_filename(filename) self.numbers = itertools.count(1) self.first_log = True def finish_tree(self, tree, filename): """Some fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. """ pass class ConditionalFix(BaseFix): """ Base class for fixers which not execute if an import is found. """ # This is the name of the import which, if found, will cause the test to be skipped skip_on = None def start_tree(self, *args): super(ConditionalFix, self).start_tree(*args) self._should_skip = None def should_skip(self, node): if self._should_skip is not None: return self._should_skip pkg = self.skip_on.split(".") name = pkg[-1] pkg = ".".join(pkg[:-1]) self._should_skip = does_tree_import(pkg, name, node) return self._should_skip fixer_base.pyc000064400000016022147204472210007374 0ustar00 {fc@srdZddlZddlmZddlmZddlmZdefdYZ d e fd YZ dS( s2Base class for fixers (optional, but recommended).iNi(tPatternCompiler(tpygram(tdoes_tree_importtBaseFixcBseZdZdZdZdZdZdZdZ e j dZ e ZdZeZdZdZeZeZejZdZdZdZdZdZd d Zd Zdd Z d Z!dZ"dZ#RS(sOptional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. itposticCs ||_||_|jdS(sInitializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. N(toptionstlogtcompile_pattern(tselfRR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt__init__0s  cCsC|jdk r?t}|j|jdt\|_|_ndS(sCompiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). t with_treeN(tPATTERNtNoneRRtTruetpatternt pattern_tree(RtPC((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR<s cCs ||_dS(smSet the filename, and a logger derived from it. The main refactoring tool should call this. N(tfilename(RR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt set_filenameGscCs&i|d6}|jj||o%|S(sReturns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. tnode(Rtmatch(RRtresults((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyRNs cCs tdS(sReturns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. N(tNotImplementedError(RRR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt transformZsuxxx_todo_changemecCsI|}x,||jkr4|t|jj}q W|jj||S(sReturn a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. (t used_namestunicodetnumberstnexttadd(Rttemplatetname((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytnew_namejs cCs@|jr,t|_|jjd|jn|jj|dS(Ns### In file %s ###(t first_logtFalseRtappendR(Rtmessage((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt log_messageus  cCsX|j}|j}d|_d}|j|||f|rT|j|ndS(sWarn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. usLine %d: could not convert: %sN(t get_linenotclonetprefixR$(RRtreasontlinenot for_outputtmsg((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytcannot_convert{s   cCs'|j}|jd||fdS(sUsed for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. s Line %d: %sN(R%R$(RRR(R)((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytwarnings cCs8|j|_|j|tjd|_t|_dS(sSome fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. iN(RRt itertoolstcountRR R (RttreeR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt start_trees  cCsdS(sSome fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. N((RR0R((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt finish_treesN($t__name__t __module__t__doc__R R RRRRtloggerR.R/RtsetRtorderR!texplicitt run_ordert _accept_typetkeep_line_ordert BM_compatibleRtpython_symbolstsymsR RRRRRR$R,R-R1R2(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyRs6       tConditionalFixcBs&eZdZdZdZdZRS(s@ Base class for fixers which not execute if an import is found. cGs#tt|j|d|_dS(N(tsuperR@R1R t _should_skip(Rtargs((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR1scCsa|jdk r|jS|jjd}|d}dj|d }t||||_|jS(Nt.i(RBR tskip_ontsplittjoinR(RRtpkgR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt should_skips N(R3R4R5R RER1RI(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR@s ( R5R.tpatcompRtRt fixer_utilRtobjectRR@(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyts  fixer_base.pyo000064400000016022147204472210007410 0ustar00 {fc@srdZddlZddlmZddlmZddlmZdefdYZ d e fd YZ dS( s2Base class for fixers (optional, but recommended).iNi(tPatternCompiler(tpygram(tdoes_tree_importtBaseFixcBseZdZdZdZdZdZdZdZ e j dZ e ZdZeZdZdZeZeZejZdZdZdZdZdZd d Zd Zdd Z d Z!dZ"dZ#RS(sOptional base class for fixers. The subclass name must be FixFooBar where FooBar is the result of removing underscores and capitalizing the words of the fix name. For example, the class name for a fixer named 'has_key' should be FixHasKey. itposticCs ||_||_|jdS(sInitializer. Subclass may override. Args: options: a dict containing the options passed to RefactoringTool that could be used to customize the fixer through the command line. log: a list to append warnings and other messages to. N(toptionstlogtcompile_pattern(tselfRR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt__init__0s  cCsC|jdk r?t}|j|jdt\|_|_ndS(sCompiles self.PATTERN into self.pattern. Subclass may override if it doesn't want to use self.{pattern,PATTERN} in .match(). t with_treeN(tPATTERNtNoneRRtTruetpatternt pattern_tree(RtPC((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR<s cCs ||_dS(smSet the filename, and a logger derived from it. The main refactoring tool should call this. N(tfilename(RR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt set_filenameGscCs&i|d6}|jj||o%|S(sReturns match for a given parse tree node. Should return a true or false object (not necessarily a bool). It may return a non-empty dict of matching sub-nodes as returned by a matching pattern. Subclass may override. tnode(Rtmatch(RRtresults((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyRNs cCs tdS(sReturns the transformation for a given parse tree node. Args: node: the root of the parse tree that matched the fixer. results: a dict mapping symbolic names to part of the match. Returns: None, or a node that is a modified copy of the argument node. The node argument may also be modified in-place to effect the same change. Subclass *must* override. N(tNotImplementedError(RRR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt transformZsuxxx_todo_changemecCsI|}x,||jkr4|t|jj}q W|jj||S(sReturn a string suitable for use as an identifier The new name is guaranteed not to conflict with other identifiers. (t used_namestunicodetnumberstnexttadd(Rttemplatetname((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytnew_namejs cCs@|jr,t|_|jjd|jn|jj|dS(Ns### In file %s ###(t first_logtFalseRtappendR(Rtmessage((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt log_messageus  cCsX|j}|j}d|_d}|j|||f|rT|j|ndS(sWarn the user that a given chunk of code is not valid Python 3, but that it cannot be converted automatically. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. usLine %d: could not convert: %sN(t get_linenotclonetprefixR$(RRtreasontlinenot for_outputtmsg((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytcannot_convert{s   cCs'|j}|jd||fdS(sUsed for warning the user about possible uncertainty in the translation. First argument is the top-level node for the code in question. Optional second argument is why it can't be converted. s Line %d: %sN(R%R$(RRR(R)((s*/usr/lib64/python2.7/lib2to3/fixer_base.pytwarnings cCs8|j|_|j|tjd|_t|_dS(sSome fixers need to maintain tree-wide state. This method is called once, at the start of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. iN(RRt itertoolstcountRR R (RttreeR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt start_trees  cCsdS(sSome fixers need to maintain tree-wide state. This method is called once, at the conclusion of tree fix-up. tree - the root node of the tree to be processed. filename - the name of the file the tree came from. N((RR0R((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt finish_treesN($t__name__t __module__t__doc__R R RRRRtloggerR.R/RtsetRtorderR!texplicitt run_ordert _accept_typetkeep_line_ordert BM_compatibleRtpython_symbolstsymsR RRRRRR$R,R-R1R2(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyRs6       tConditionalFixcBs&eZdZdZdZdZRS(s@ Base class for fixers which not execute if an import is found. cGs#tt|j|d|_dS(N(tsuperR@R1R t _should_skip(Rtargs((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR1scCsa|jdk r|jS|jjd}|d}dj|d }t||||_|jS(Nt.i(RBR tskip_ontsplittjoinR(RRtpkgR((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyt should_skips N(R3R4R5R RER1RI(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyR@s ( R5R.tpatcompRtRt fixer_utilRtobjectRR@(((s*/usr/lib64/python2.7/lib2to3/fixer_base.pyts  fixer_util.py000064400000034405147204472210007301 0ustar00"""Utility functions, node construction macros, etc.""" # Author: Collin Winter from itertools import islice # Local imports from .pgen2 import token from .pytree import Leaf, Node from .pygram import python_symbols as syms from . import patcomp ########################################################### ### Common node-construction "macros" ########################################################### def KeywordArg(keyword, value): return Node(syms.argument, [keyword, Leaf(token.EQUAL, u"="), value]) def LParen(): return Leaf(token.LPAR, u"(") def RParen(): return Leaf(token.RPAR, u")") def Assign(target, source): """Build an assignment statement""" if not isinstance(target, list): target = [target] if not isinstance(source, list): source.prefix = u" " source = [source] return Node(syms.atom, target + [Leaf(token.EQUAL, u"=", prefix=u" ")] + source) def Name(name, prefix=None): """Return a NAME leaf""" return Leaf(token.NAME, name, prefix=prefix) def Attr(obj, attr): """A node tuple for obj.attr""" return [obj, Node(syms.trailer, [Dot(), attr])] def Comma(): """A comma leaf""" return Leaf(token.COMMA, u",") def Dot(): """A period (.) leaf""" return Leaf(token.DOT, u".") def ArgList(args, lparen=LParen(), rparen=RParen()): """A parenthesised argument list, used by Call()""" node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) if args: node.insert_child(1, Node(syms.arglist, args)) return node def Call(func_name, args=None, prefix=None): """A function call""" node = Node(syms.power, [func_name, ArgList(args)]) if prefix is not None: node.prefix = prefix return node def Newline(): """A newline literal""" return Leaf(token.NEWLINE, u"\n") def BlankLine(): """A blank line""" return Leaf(token.NEWLINE, u"") def Number(n, prefix=None): return Leaf(token.NUMBER, n, prefix=prefix) def Subscript(index_node): """A numeric or string subscript""" return Node(syms.trailer, [Leaf(token.LBRACE, u"["), index_node, Leaf(token.RBRACE, u"]")]) def String(string, prefix=None): """A string leaf""" return Leaf(token.STRING, string, prefix=prefix) def ListComp(xp, fp, it, test=None): """A list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. """ xp.prefix = u"" fp.prefix = u" " it.prefix = u" " for_leaf = Leaf(token.NAME, u"for") for_leaf.prefix = u" " in_leaf = Leaf(token.NAME, u"in") in_leaf.prefix = u" " inner_args = [for_leaf, fp, in_leaf, it] if test: test.prefix = u" " if_leaf = Leaf(token.NAME, u"if") if_leaf.prefix = u" " inner_args.append(Node(syms.comp_if, [if_leaf, test])) inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) return Node(syms.atom, [Leaf(token.LBRACE, u"["), inner, Leaf(token.RBRACE, u"]")]) def FromImport(package_name, name_leafs): """ Return an import statement in the form: from package import name_leafs""" # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') #assert package_name == '.' or '.' not in package_name, "FromImport has "\ # "not been tested with dotted package names -- use at your own "\ # "peril!" for leaf in name_leafs: # Pull the leaves out of their old tree leaf.remove() children = [Leaf(token.NAME, u"from"), Leaf(token.NAME, package_name, prefix=u" "), Leaf(token.NAME, u"import", prefix=u" "), Node(syms.import_as_names, name_leafs)] imp = Node(syms.import_from, children) return imp ########################################################### ### Determine whether a node represents a given literal ########################################################### def is_tuple(node): """Does the node represent a tuple literal?""" if isinstance(node, Node) and node.children == [LParen(), RParen()]: return True return (isinstance(node, Node) and len(node.children) == 3 and isinstance(node.children[0], Leaf) and isinstance(node.children[1], Node) and isinstance(node.children[2], Leaf) and node.children[0].value == u"(" and node.children[2].value == u")") def is_list(node): """Does the node represent a list literal?""" return (isinstance(node, Node) and len(node.children) > 1 and isinstance(node.children[0], Leaf) and isinstance(node.children[-1], Leaf) and node.children[0].value == u"[" and node.children[-1].value == u"]") ########################################################### ### Misc ########################################################### def parenthesize(node): return Node(syms.atom, [LParen(), node, RParen()]) consuming_calls = set(["sorted", "list", "set", "any", "all", "tuple", "sum", "min", "max", "enumerate"]) def attr_chain(obj, attr): """Follow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. """ next = getattr(obj, attr) while next: yield next next = getattr(next, attr) p0 = """for_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > """ p1 = """ power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > """ p2 = """ power< ( 'sorted' | 'enumerate' ) trailer< '(' arglist ')' > any* > """ pats_built = False def in_special_context(node): """ Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests. """ global p0, p1, p2, pats_built if not pats_built: p0 = patcomp.compile_pattern(p0) p1 = patcomp.compile_pattern(p1) p2 = patcomp.compile_pattern(p2) pats_built = True patterns = [p0, p1, p2] for pattern, parent in zip(patterns, attr_chain(node, "parent")): results = {} if pattern.match(parent, results) and results["node"] is node: return True return False def is_probably_builtin(node): """ Check that something isn't an attribute or function name etc. """ prev = node.prev_sibling if prev is not None and prev.type == token.DOT: # Attribute lookup. return False parent = node.parent if parent.type in (syms.funcdef, syms.classdef): return False if parent.type == syms.expr_stmt and parent.children[0] is node: # Assignment. return False if parent.type == syms.parameters or \ (parent.type == syms.typedargslist and ( (prev is not None and prev.type == token.COMMA) or parent.children[0] is node )): # The name of an argument. return False return True def find_indentation(node): """Find the indentation of *node*.""" while node is not None: if node.type == syms.suite and len(node.children) > 2: indent = node.children[1] if indent.type == token.INDENT: return indent.value node = node.parent return u"" ########################################################### ### The following functions are to find bindings in a suite ########################################################### def make_suite(node): if node.type == syms.suite: return node node = node.clone() parent, node.parent = node.parent, None suite = Node(syms.suite, [node]) suite.parent = parent return suite def find_root(node): """Find the top level namespace.""" # Scamper up to the top level namespace while node.type != syms.file_input: node = node.parent if not node: raise ValueError("root found before file_input node was found.") return node def does_tree_import(package, name, node): """ Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. """ binding = find_binding(name, find_root(node), package) return bool(binding) def is_import(node): """Returns true if the node is an import statement.""" return node.type in (syms.import_name, syms.import_from) def touch_import(package, name, node): """ Works like `does_tree_import` but adds an import statement if it was not imported. """ def is_import_stmt(node): return (node.type == syms.simple_stmt and node.children and is_import(node.children[0])) root = find_root(node) if does_tree_import(package, name, root): return # figure out where to insert the new import. First try to find # the first import and then skip to the last one. insert_pos = offset = 0 for idx, node in enumerate(root.children): if not is_import_stmt(node): continue for offset, node2 in enumerate(root.children[idx:]): if not is_import_stmt(node2): break insert_pos = idx + offset break # if there are no imports where we can insert, find the docstring. # if that also fails, we stick to the beginning of the file if insert_pos == 0: for idx, node in enumerate(root.children): if (node.type == syms.simple_stmt and node.children and node.children[0].type == token.STRING): insert_pos = idx + 1 break if package is None: import_ = Node(syms.import_name, [ Leaf(token.NAME, u"import"), Leaf(token.NAME, name, prefix=u" ") ]) else: import_ = FromImport(package, [Leaf(token.NAME, name, prefix=u" ")]) children = [import_, Newline()] root.insert_child(insert_pos, Node(syms.simple_stmt, children)) _def_syms = set([syms.classdef, syms.funcdef]) def find_binding(name, node, package=None): """ Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.""" for child in node.children: ret = None if child.type == syms.for_stmt: if _find(name, child.children[1]): return child n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type in (syms.if_stmt, syms.while_stmt): n = find_binding(name, make_suite(child.children[-1]), package) if n: ret = n elif child.type == syms.try_stmt: n = find_binding(name, make_suite(child.children[2]), package) if n: ret = n else: for i, kid in enumerate(child.children[3:]): if kid.type == token.COLON and kid.value == ":": # i+3 is the colon, i+4 is the suite n = find_binding(name, make_suite(child.children[i+4]), package) if n: ret = n elif child.type in _def_syms and child.children[1].value == name: ret = child elif _is_import_binding(child, name, package): ret = child elif child.type == syms.simple_stmt: ret = find_binding(name, child, package) elif child.type == syms.expr_stmt: if _find(name, child.children[0]): ret = child if ret: if not package: return ret if is_import(ret): return ret return None _block_syms = set([syms.funcdef, syms.classdef, syms.trailer]) def _find(name, node): nodes = [node] while nodes: node = nodes.pop() if node.type > 256 and node.type not in _block_syms: nodes.extend(node.children) elif node.type == token.NAME and node.value == name: return node return None def _is_import_binding(node, name, package=None): """ Will reuturn node if node will import name, or node will import * from package. None is returned otherwise. See test cases for examples. """ if node.type == syms.import_name and not package: imp = node.children[1] if imp.type == syms.dotted_as_names: for child in imp.children: if child.type == syms.dotted_as_name: if child.children[2].value == name: return node elif child.type == token.NAME and child.value == name: return node elif imp.type == syms.dotted_as_name: last = imp.children[-1] if last.type == token.NAME and last.value == name: return node elif imp.type == token.NAME and imp.value == name: return node elif node.type == syms.import_from: # unicode(...) is used to make life easier here, because # from a.b import parses to ['import', ['a', '.', 'b'], ...] if package and unicode(node.children[1]).strip() != package: return None n = node.children[3] if package and _find(u"as", n): # See test_from_import_as for explanation return None elif n.type == syms.import_as_names and _find(name, n): return node elif n.type == syms.import_as_name: child = n.children[2] if child.type == token.NAME and child.value == name: return node elif n.type == token.NAME and n.value == name: return node elif package and n.type == token.STAR: return node return None fixer_util.pyc000064400000034533147204472210007446 0ustar00 {fc @sdZddlmZddlmZddlmZmZddlm Z ddl m Z dZ d Zd Zd Zd5d Zd ZdZdZeedZd5d5dZdZdZd5dZdZd5dZd5dZdZdZdZ dZ!e"ddddd d!d"d#d$d%g Z#d&Z$d'a%d(a&d)a'e(a)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1e"e j2e j3gZ4d5d2Z5e"e j3e j2e j6gZ7d3Z8d5d4Z9d5S(6s1Utility functions, node construction macros, etc.i(tislicei(ttoken(tLeaftNode(tpython_symbols(tpatcompcCs%ttj|ttjd|gS(Nu=(RtsymstargumentRRtEQUAL(tkeywordtvalue((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt KeywordArgs cCsttjdS(Nu((RRtLPAR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytLParenscCsttjdS(Nu)(RRtRPAR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytRParenscCslt|ts|g}nt|ts?d|_|g}nttj|ttjdddg|S(sBuild an assignment statementu u=tprefix( t isinstancetlistRRRtatomRRR(ttargettsource((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytAssigns    cCsttj|d|S(sReturn a NAME leafR(RRtNAME(tnameR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytName&scCs|ttjt|ggS(sA node tuple for obj.attr(RRttrailertDot(tobjtattr((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytAttr*scCsttjdS(s A comma leafu,(RRtCOMMA(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytComma.scCsttjdS(sA period (.) leafu.(RRtDOT(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyR2scCsMttj|j|jg}|rI|jdttj|n|S(s-A parenthesised argument list, used by Call()i(RRRtclonet insert_childtarglist(targstlparentrparentnode((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytArgList6s$cCs:ttj|t|g}|dk r6||_n|S(sA function callN(RRtpowerR)tNoneR(t func_nameR%RR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytCall=s  cCsttjdS(sA newline literalu (RRtNEWLINE(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytNewlineDscCsttjdS(s A blank lineu(RRR.(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt BlankLineHscCsttj|d|S(NR(RRtNUMBER(tnR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytNumberLscCs1ttjttjd|ttjdgS(sA numeric or string subscriptu[u](RRRRRtLBRACEtRBRACE(t index_node((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt SubscriptOscCsttj|d|S(s A string leafR(RRtSTRING(tstringR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytStringUsc Csd|_d|_d|_ttjd}d|_ttjd}d|_||||g}|rd|_ttjd}d|_|jttj||gnttj|ttj |g}ttj ttj d|ttj dgS(suA list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. uu uforuinuifu[u]( RRRRtappendRRtcomp_ift listmakertcomp_forRR4R5( txptfptitttesttfor_leaftin_leaft inner_argstif_leaftinner((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytListCompYs$       "$ cCsx|D]}|jqWttjdttj|ddttjdddttj|g}ttj|}|S(sO Return an import statement in the form: from package import name_leafsufromRu uimport(tremoveRRRRRtimport_as_namest import_from(t package_namet name_leafstleaftchildrentimp((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt FromImportqs cCst|tr.|jttgkr.tSt|tot|jdkot|jdtot|jdtot|jdto|jdjdko|jdjdkS(s(Does the node represent a tuple literal?iiiiu(u)( RRROR RtTruetlenRR (R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_tuples*cCszt|toyt|jdkoyt|jdtoyt|jdtoy|jdjdkoy|jdjdkS(s'Does the node represent a list literal?iiiu[u](RRRSRORR (R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_lists cCsttjt|tgS(N(RRRR R(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt parenthesizestsortedRtsettanytallttupletsumtmintmaxt enumerateccs4t||}x|r/|Vt||}qWdS(slFollow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. N(tgetattr(RRtnext((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt attr_chains sefor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > s power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > s` power< ( 'sorted' | 'enumerate' ) trailer< '(' arglist ')' > any* > cCsts<tjtatjtatjtatantttg}xRt|t|dD]8\}}i}|j ||rd|d|krdtSqdWt S(s Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests. tparentR(( t pats_builtRtcompile_patterntp0tp1tp2RRtzipRbtmatchtFalse(R(tpatternstpatternRctresults((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytin_special_contexts %"cCs|j}|dk r+|jtjkr+tS|j}|jtjtj fkrStS|jtj kr||j d|kr|tS|jtj ks|jtj kr|dk r|jtjks|j d|krtStS(sG Check that something isn't an attribute or function name etc. iN(t prev_siblingR+ttypeRR!RkRcRtfuncdeftclassdeft expr_stmtROt parameterst typedargslistRRR(R(tprevRc((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_probably_builtins  %cCspxi|dk rk|jtjkr_t|jdkr_|jd}|jtjkr_|jSn|j }qWdS(sFind the indentation of *node*.iiuN( R+RqRtsuiteRSRORtINDENTR Rc(R(tindent((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytfind_indentations'   cCsW|jtjkr|S|j}|jd}|_ttj|g}||_|S(N(RqRRyR"RcR+R(R(RcRy((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt make_suites  cCs;x4|jtjkr6|j}|stdqqW|S(sFind the top level namespace.s,root found before file_input node was found.(RqRt file_inputRct ValueError(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt find_roots  cCs"t|t||}t|S(s Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. (t find_bindingRtbool(tpackageRR(tbinding((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytdoes_tree_importscCs|jtjtjfkS(s0Returns true if the node is an import statement.(RqRt import_nameRK(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt is_import"sc Csd}t|}t|||r+dSd}}xnt|jD]]\}}||scqEnx1t|j|D]\}}||swPqwqwW||}PqEW|dkrxbt|jD]N\}}|jtjkr|jr|jdjtjkr|d}PqqWn|dkr\t tj t tj dt tj |ddg} n$t|t tj |ddg} | tg} |j|t tj| dS(s\ Works like `does_tree_import` but adds an import statement if it was not imported. cSs,|jtjko+|jo+t|jdS(Ni(RqRt simple_stmtROR(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_import_stmt)sNiiuimportRu (RRR_RORqRRRR8R+RRRRRQR/R#( RRR(Rtroott insert_postoffsettidxtnode2timport_RO((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt touch_import&s4            !$cCsKxD|jD]9}d}|jtjkrst||jdrB|St|t|jd|}|r |}q n|jtjtj fkrt|t|jd|}|r |}q na|jtj kr|t|t|jd|}|r|}q xt |jdD]b\}}|jt j kr|jdkrt|t|j|d|}|ru|}quqqWn|jtkr|jdj|kr|}nvt|||r|}n[|jtjkrt|||}n4|jtjkr t||jdr |}q n|r |s0|St|rC|Sq q WdS( s Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.iiiit:iiN(ROR+RqRtfor_stmtt_findRR}tif_stmtt while_stmtttry_stmtR_RtCOLONR t _def_symst_is_import_bindingRRtR(RR(RtchildtretR2titkid((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRTsH    !#%     cCs||g}xl|rw|j}|jdkrO|jtkrO|j|jq |jtjkr |j|kr |Sq WdS(Ni( tpopRqt _block_symstextendRORRR R+(RR(tnodes((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRs   !cCs'|jtjkr| r|jd}|jtjkrx|jD]Z}|jtjkrw|jdj|kr|SqB|jtjkrB|j|krB|SqBWq#|jtjkr|jd}|jtjkr|j|kr|Sq#|jtjkr#|j|kr#|Sn|jtj kr#|rMt |jdj |krMdS|jd}|rst d|rsdS|jtjkrt ||r|S|jtjkr|jd}|jtjkr |j|kr |Sq#|jtjkr|j|kr|S|r#|jtjkr#|SndS(s Will reuturn node if node will import name, or node will import * from package. None is returned otherwise. See test cases for examples. iiiiuasN(RqRRROtdotted_as_namestdotted_as_nameR RRRKtunicodetstripR+RRJtimport_as_nametSTAR(R(RRRPRtlastR2((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRs@ !  !!% ! !!N(:t__doc__t itertoolsRtpgen2RtpytreeRRtpygramRRtRR R RRR+RRR RR)R-R/R0R3R7R:RHRQRTRURVRXtconsuming_callsRbRfRgRhRkRdRoRxR|R}RRRRRsRrRRRRRR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytsZ                       - * fixer_util.pyo000064400000034533147204472210007462 0ustar00 {fc @sdZddlmZddlmZddlmZmZddlm Z ddl m Z dZ d Zd Zd Zd5d Zd ZdZdZeedZd5d5dZdZdZd5dZdZd5dZd5dZdZdZdZ dZ!e"ddddd d!d"d#d$d%g Z#d&Z$d'a%d(a&d)a'e(a)d*Z*d+Z+d,Z,d-Z-d.Z.d/Z/d0Z0d1Z1e"e j2e j3gZ4d5d2Z5e"e j3e j2e j6gZ7d3Z8d5d4Z9d5S(6s1Utility functions, node construction macros, etc.i(tislicei(ttoken(tLeaftNode(tpython_symbols(tpatcompcCs%ttj|ttjd|gS(Nu=(RtsymstargumentRRtEQUAL(tkeywordtvalue((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt KeywordArgs cCsttjdS(Nu((RRtLPAR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytLParenscCsttjdS(Nu)(RRtRPAR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytRParenscCslt|ts|g}nt|ts?d|_|g}nttj|ttjdddg|S(sBuild an assignment statementu u=tprefix( t isinstancetlistRRRtatomRRR(ttargettsource((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytAssigns    cCsttj|d|S(sReturn a NAME leafR(RRtNAME(tnameR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytName&scCs|ttjt|ggS(sA node tuple for obj.attr(RRttrailertDot(tobjtattr((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytAttr*scCsttjdS(s A comma leafu,(RRtCOMMA(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytComma.scCsttjdS(sA period (.) leafu.(RRtDOT(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyR2scCsMttj|j|jg}|rI|jdttj|n|S(s-A parenthesised argument list, used by Call()i(RRRtclonet insert_childtarglist(targstlparentrparentnode((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytArgList6s$cCs:ttj|t|g}|dk r6||_n|S(sA function callN(RRtpowerR)tNoneR(t func_nameR%RR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytCall=s  cCsttjdS(sA newline literalu (RRtNEWLINE(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytNewlineDscCsttjdS(s A blank lineu(RRR.(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt BlankLineHscCsttj|d|S(NR(RRtNUMBER(tnR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytNumberLscCs1ttjttjd|ttjdgS(sA numeric or string subscriptu[u](RRRRRtLBRACEtRBRACE(t index_node((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt SubscriptOscCsttj|d|S(s A string leafR(RRtSTRING(tstringR((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytStringUsc Csd|_d|_d|_ttjd}d|_ttjd}d|_||||g}|rd|_ttjd}d|_|jttj||gnttj|ttj |g}ttj ttj d|ttj dgS(suA list comprehension of the form [xp for fp in it if test]. If test is None, the "if test" part is omitted. uu uforuinuifu[u]( RRRRtappendRRtcomp_ift listmakertcomp_forRR4R5( txptfptitttesttfor_leaftin_leaft inner_argstif_leaftinner((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytListCompYs$       "$ cCsx|D]}|jqWttjdttj|ddttjdddttj|g}ttj|}|S(sO Return an import statement in the form: from package import name_leafsufromRu uimport(tremoveRRRRRtimport_as_namest import_from(t package_namet name_leafstleaftchildrentimp((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt FromImportqs cCst|tr.|jttgkr.tSt|tot|jdkot|jdtot|jdtot|jdto|jdjdko|jdjdkS(s(Does the node represent a tuple literal?iiiiu(u)( RRROR RtTruetlenRR (R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_tuples*cCszt|toyt|jdkoyt|jdtoyt|jdtoy|jdjdkoy|jdjdkS(s'Does the node represent a list literal?iiiu[u](RRRSRORR (R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_lists cCsttjt|tgS(N(RRRR R(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt parenthesizestsortedRtsettanytallttupletsumtmintmaxt enumerateccs4t||}x|r/|Vt||}qWdS(slFollow an attribute chain. If you have a chain of objects where a.foo -> b, b.foo-> c, etc, use this to iterate over all objects in the chain. Iteration is terminated by getattr(x, attr) is None. Args: obj: the starting object attr: the name of the chaining attribute Yields: Each successive object in the chain. N(tgetattr(RRtnext((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt attr_chains sefor_stmt< 'for' any 'in' node=any ':' any* > | comp_for< 'for' any 'in' node=any any* > s power< ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) trailer< '(' node=any ')' > any* > s` power< ( 'sorted' | 'enumerate' ) trailer< '(' arglist ')' > any* > cCsts<tjtatjtatjtatantttg}xRt|t|dD]8\}}i}|j ||rd|d|krdtSqdWt S(s Returns true if node is in an environment where all that is required of it is being iterable (ie, it doesn't matter if it returns a list or an iterator). See test_map_nochange in test_fixers.py for some examples and tests. tparentR(( t pats_builtRtcompile_patterntp0tp1tp2RRtzipRbtmatchtFalse(R(tpatternstpatternRctresults((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytin_special_contexts %"cCs|j}|dk r+|jtjkr+tS|j}|jtjtj fkrStS|jtj kr||j d|kr|tS|jtj ks|jtj kr|dk r|jtjks|j d|krtStS(sG Check that something isn't an attribute or function name etc. iN(t prev_siblingR+ttypeRR!RkRcRtfuncdeftclassdeft expr_stmtROt parameterst typedargslistRRR(R(tprevRc((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_probably_builtins  %cCspxi|dk rk|jtjkr_t|jdkr_|jd}|jtjkr_|jSn|j }qWdS(sFind the indentation of *node*.iiuN( R+RqRtsuiteRSRORtINDENTR Rc(R(tindent((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytfind_indentations'   cCsW|jtjkr|S|j}|jd}|_ttj|g}||_|S(N(RqRRyR"RcR+R(R(RcRy((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt make_suites  cCs;x4|jtjkr6|j}|stdqqW|S(sFind the top level namespace.s,root found before file_input node was found.(RqRt file_inputRct ValueError(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt find_roots  cCs"t|t||}t|S(s Returns true if name is imported from package at the top level of the tree which node belongs to. To cover the case of an import like 'import foo', use None for the package and 'foo' for the name. (t find_bindingRtbool(tpackageRR(tbinding((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytdoes_tree_importscCs|jtjtjfkS(s0Returns true if the node is an import statement.(RqRt import_nameRK(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt is_import"sc Csd}t|}t|||r+dSd}}xnt|jD]]\}}||scqEnx1t|j|D]\}}||swPqwqwW||}PqEW|dkrxbt|jD]N\}}|jtjkr|jr|jdjtjkr|d}PqqWn|dkr\t tj t tj dt tj |ddg} n$t|t tj |ddg} | tg} |j|t tj| dS(s\ Works like `does_tree_import` but adds an import statement if it was not imported. cSs,|jtjko+|jo+t|jdS(Ni(RqRt simple_stmtROR(R(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytis_import_stmt)sNiiuimportRu (RRR_RORqRRRR8R+RRRRRQR/R#( RRR(Rtroott insert_postoffsettidxtnode2timport_RO((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyt touch_import&s4            !$cCsKxD|jD]9}d}|jtjkrst||jdrB|St|t|jd|}|r |}q n|jtjtj fkrt|t|jd|}|r |}q na|jtj kr|t|t|jd|}|r|}q xt |jdD]b\}}|jt j kr|jdkrt|t|j|d|}|ru|}quqqWn|jtkr|jdj|kr|}nvt|||r|}n[|jtjkrt|||}n4|jtjkr t||jdr |}q n|r |s0|St|rC|Sq q WdS( s Returns the node which binds variable name, otherwise None. If optional argument package is supplied, only imports will be returned. See test cases for examples.iiiit:iiN(ROR+RqRtfor_stmtt_findRR}tif_stmtt while_stmtttry_stmtR_RtCOLONR t _def_symst_is_import_bindingRRtR(RR(RtchildtretR2titkid((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRTsH    !#%     cCs||g}xl|rw|j}|jdkrO|jtkrO|j|jq |jtjkr |j|kr |Sq WdS(Ni( tpopRqt _block_symstextendRORRR R+(RR(tnodes((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRs   !cCs'|jtjkr| r|jd}|jtjkrx|jD]Z}|jtjkrw|jdj|kr|SqB|jtjkrB|j|krB|SqBWq#|jtjkr|jd}|jtjkr|j|kr|Sq#|jtjkr#|j|kr#|Sn|jtj kr#|rMt |jdj |krMdS|jd}|rst d|rsdS|jtjkrt ||r|S|jtjkr|jd}|jtjkr |j|kr |Sq#|jtjkr|j|kr|S|r#|jtjkr#|SndS(s Will reuturn node if node will import name, or node will import * from package. None is returned otherwise. See test cases for examples. iiiiuasN(RqRRROtdotted_as_namestdotted_as_nameR RRRKtunicodetstripR+RRJtimport_as_nametSTAR(R(RRRPRtlastR2((s*/usr/lib64/python2.7/lib2to3/fixer_util.pyRs@ !  !!% ! !!N(:t__doc__t itertoolsRtpgen2RtpytreeRRtpygramRRtRR R RRR+RRR RR)R-R/R0R3R7R:RHRQRTRURVRXtconsuming_callsRbRfRgRhRkRdRoRxR|R}RRRRRsRrRRRRRR(((s*/usr/lib64/python2.7/lib2to3/fixer_util.pytsZ                       - * main.py000064400000026525147204472210006057 0ustar00""" Main program for 2to3. """ from __future__ import with_statement import sys import os import difflib import logging import shutil import optparse from . import refactor def diff_texts(a, b, filename): """Return a unified diff of two strings.""" a = a.splitlines() b = b.splitlines() return difflib.unified_diff(a, b, filename, filename, "(original)", "(refactored)", lineterm="") class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): """ A refactoring tool that can avoid overwriting its input files. Prints output to stdout. Output files can optionally be written to a different directory and or have an extra file suffix appended to their name for use in situations where you do not want to replace the input files. """ def __init__(self, fixers, options, explicit, nobackups, show_diffs, input_base_dir='', output_dir='', append_suffix=''): """ Args: fixers: A list of fixers to import. options: A dict with RefactoringTool configuration. explicit: A list of fixers to run even if they are explicit. nobackups: If true no backup '.bak' files will be created for those files that are being refactored. show_diffs: Should diffs of the refactoring be printed to stdout? input_base_dir: The base directory for all input files. This class will strip this path prefix off of filenames before substituting it with output_dir. Only meaningful if output_dir is supplied. All files processed by refactor() must start with this path. output_dir: If supplied, all converted files will be written into this directory tree instead of input_base_dir. append_suffix: If supplied, all files output by this tool will have this appended to their filename. Useful for changing .py to .py3 for example by passing append_suffix='3'. """ self.nobackups = nobackups self.show_diffs = show_diffs if input_base_dir and not input_base_dir.endswith(os.sep): input_base_dir += os.sep self._input_base_dir = input_base_dir self._output_dir = output_dir self._append_suffix = append_suffix super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) def log_error(self, msg, *args, **kwargs): self.errors.append((msg, args, kwargs)) self.logger.error(msg, *args, **kwargs) def write_file(self, new_text, filename, old_text, encoding): orig_filename = filename if self._output_dir: if filename.startswith(self._input_base_dir): filename = os.path.join(self._output_dir, filename[len(self._input_base_dir):]) else: raise ValueError('filename %s does not start with the ' 'input_base_dir %s' % ( filename, self._input_base_dir)) if self._append_suffix: filename += self._append_suffix if orig_filename != filename: output_dir = os.path.dirname(filename) if not os.path.isdir(output_dir): os.makedirs(output_dir) self.log_message('Writing converted %s to %s.', orig_filename, filename) if not self.nobackups: # Make backup backup = filename + ".bak" if os.path.lexists(backup): try: os.remove(backup) except os.error, err: self.log_message("Can't remove backup %s", backup) try: os.rename(filename, backup) except os.error, err: self.log_message("Can't rename %s to %s", filename, backup) # Actually write the new file write = super(StdoutRefactoringTool, self).write_file write(new_text, filename, old_text, encoding) if not self.nobackups: shutil.copymode(backup, filename) if orig_filename != filename: # Preserve the file mode in the new output directory. shutil.copymode(orig_filename, filename) def print_output(self, old, new, filename, equal): if equal: self.log_message("No changes to %s", filename) else: self.log_message("Refactored %s", filename) if self.show_diffs: diff_lines = diff_texts(old, new, filename) try: if self.output_lock is not None: with self.output_lock: for line in diff_lines: print line sys.stdout.flush() else: for line in diff_lines: print line except UnicodeEncodeError: warn("couldn't encode %s's diff for your terminal" % (filename,)) return def warn(msg): print >> sys.stderr, "WARNING: %s" % (msg,) def main(fixer_pkg, args=None): """Main program. Args: fixer_pkg: the name of a package where the fixers are located. args: optional; a list of command line arguments. If omitted, sys.argv[1:] is used. Returns a suggested exit status (0, 1, 2). """ # Set up option parser parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") parser.add_option("-d", "--doctests_only", action="store_true", help="Fix up doctests only") parser.add_option("-f", "--fix", action="append", default=[], help="Each FIX specifies a transformation; default: all") parser.add_option("-j", "--processes", action="store", default=1, type="int", help="Run 2to3 concurrently") parser.add_option("-x", "--nofix", action="append", default=[], help="Prevent a transformation from being run") parser.add_option("-l", "--list-fixes", action="store_true", help="List available transformations") parser.add_option("-p", "--print-function", action="store_true", help="Modify the grammar so that print() is a function") parser.add_option("-v", "--verbose", action="store_true", help="More verbose logging") parser.add_option("--no-diffs", action="store_true", help="Don't show diffs of the refactoring") parser.add_option("-w", "--write", action="store_true", help="Write back modified files") parser.add_option("-n", "--nobackups", action="store_true", default=False, help="Don't write backups for modified files") parser.add_option("-o", "--output-dir", action="store", type="str", default="", help="Put output files in this directory " "instead of overwriting the input files. Requires -n.") parser.add_option("-W", "--write-unchanged-files", action="store_true", help="Also write files even if no changes were required" " (useful with --output-dir); implies -w.") parser.add_option("--add-suffix", action="store", type="str", default="", help="Append this string to all output filenames." " Requires -n if non-empty. " "ex: --add-suffix='3' will generate .py3 files.") # Parse command line arguments refactor_stdin = False flags = {} options, args = parser.parse_args(args) if options.write_unchanged_files: flags["write_unchanged_files"] = True if not options.write: warn("--write-unchanged-files/-W implies -w.") options.write = True # If we allowed these, the original files would be renamed to backup names # but not replaced. if options.output_dir and not options.nobackups: parser.error("Can't use --output-dir/-o without -n.") if options.add_suffix and not options.nobackups: parser.error("Can't use --add-suffix without -n.") if not options.write and options.no_diffs: warn("not writing files and not printing diffs; that's not very useful") if not options.write and options.nobackups: parser.error("Can't use -n without -w") if options.list_fixes: print "Available transformations for the -f/--fix option:" for fixname in refactor.get_all_fix_names(fixer_pkg): print fixname if not args: return 0 if not args: print >> sys.stderr, "At least one file or directory argument required." print >> sys.stderr, "Use --help to show usage." return 2 if "-" in args: refactor_stdin = True if options.write: print >> sys.stderr, "Can't write to stdin." return 2 if options.print_function: flags["print_function"] = True # Set up logging handler level = logging.DEBUG if options.verbose else logging.INFO logging.basicConfig(format='%(name)s: %(message)s', level=level) logger = logging.getLogger('lib2to3.main') # Initialize the refactoring tool avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) explicit = set() if options.fix: all_present = False for fix in options.fix: if fix == "all": all_present = True else: explicit.add(fixer_pkg + ".fix_" + fix) requested = avail_fixes.union(explicit) if all_present else explicit else: requested = avail_fixes.union(explicit) fixer_names = requested.difference(unwanted_fixes) input_base_dir = os.path.commonprefix(args) if (input_base_dir and not input_base_dir.endswith(os.sep) and not os.path.isdir(input_base_dir)): # One or more similar names were passed, their directory is the base. # os.path.commonprefix() is ignorant of path elements, this corrects # for that weird API. input_base_dir = os.path.dirname(input_base_dir) if options.output_dir: input_base_dir = input_base_dir.rstrip(os.sep) logger.info('Output in %r will mirror the input directory %r layout.', options.output_dir, input_base_dir) rt = StdoutRefactoringTool( sorted(fixer_names), flags, sorted(explicit), options.nobackups, not options.no_diffs, input_base_dir=input_base_dir, output_dir=options.output_dir, append_suffix=options.add_suffix) # Refactor all files and directories passed as arguments if not rt.errors: if refactor_stdin: rt.refactor_stdin() else: try: rt.refactor(args, options.write, options.doctests_only, options.processes) except refactor.MultiprocessingUnsupported: assert options.processes > 1 print >> sys.stderr, "Sorry, -j isn't " \ "supported on this platform." return 1 rt.summarize() # Return error status (0 if rt.errors is zero) return int(bool(rt.errors)) main.pyc000064400000023147147204472210006217 0ustar00 {fc@sdZddlmZddlZddlZddlZddlZddlZddlZddl m Z dZ de j fdYZ d Zdd ZdS( s Main program for 2to3. i(twith_statementNi(trefactorc Cs:|j}|j}tj||||ddddS(s%Return a unified diff of two strings.s (original)s (refactored)tlinetermt(t splitlinestdifflibt unified_diff(tatbtfilename((s$/usr/lib64/python2.7/lib2to3/main.pyt diff_textss    tStdoutRefactoringToolcBs;eZdZddddZdZdZdZRS(s2 A refactoring tool that can avoid overwriting its input files. Prints output to stdout. Output files can optionally be written to a different directory and or have an extra file suffix appended to their name for use in situations where you do not want to replace the input files. Rc Csv||_||_|r;|jtj r;|tj7}n||_||_||_tt |j |||dS(sF Args: fixers: A list of fixers to import. options: A dict with RefactoringTool configuration. explicit: A list of fixers to run even if they are explicit. nobackups: If true no backup '.bak' files will be created for those files that are being refactored. show_diffs: Should diffs of the refactoring be printed to stdout? input_base_dir: The base directory for all input files. This class will strip this path prefix off of filenames before substituting it with output_dir. Only meaningful if output_dir is supplied. All files processed by refactor() must start with this path. output_dir: If supplied, all converted files will be written into this directory tree instead of input_base_dir. append_suffix: If supplied, all files output by this tool will have this appended to their filename. Useful for changing .py to .py3 for example by passing append_suffix='3'. N( t nobackupst show_diffstendswithtostsept_input_base_dirt _output_dirt_append_suffixtsuperR t__init__( tselftfixerstoptionstexplicitR R tinput_base_dirt output_dirt append_suffix((s$/usr/lib64/python2.7/lib2to3/main.pyR$s     cOs3|jj|||f|jj|||dS(N(terrorstappendtloggerterror(Rtmsgtargstkwargs((s$/usr/lib64/python2.7/lib2to3/main.pyt log_errorAsc Cs|}|jre|j|jrItjj|j|t|j}qetd||jfn|jr~||j7}n||krtjj |}tjj |stj |n|j d||n|j sy|d}tjj|r6ytj|Wq6tjk r2}|j d|q6Xnytj||Wqytjk ru}|j d||qyXntt|j} | |||||j stj||n||krtj||ndS(Ns5filename %s does not start with the input_base_dir %ssWriting converted %s to %s.s.baksCan't remove backup %ssCan't rename %s to %s(Rt startswithRRtpathtjointlent ValueErrorRtdirnametisdirtmakedirst log_messageR tlexiststremoveR trenameRR t write_filetshutiltcopymode( Rtnew_textR told_texttencodingt orig_filenameRtbackupterrtwrite((s$/usr/lib64/python2.7/lib2to3/main.pyR1Es@         cCs|r|jd|n|jd||jrt|||}y_|jdk r|j(x|D] }|GHqgWtjjWdQXnx|D] }|GHqWWqtk rt d|fdSXndS(NsNo changes to %ss Refactored %ss+couldn't encode %s's diff for your terminal( R-R R t output_locktNonetsyststdouttflushtUnicodeEncodeErrortwarn(RtoldtnewR tequalt diff_linestline((s$/usr/lib64/python2.7/lib2to3/main.pyt print_outputls"        (t__name__t __module__t__doc__RR$R1RG(((s$/usr/lib64/python2.7/lib2to3/main.pyR s   'cCstjd|fIJdS(Ns WARNING: %s(R=tstderr(R!((s$/usr/lib64/python2.7/lib2to3/main.pyRAsc stjdd}|jdddddd|jd d dd d gdd |jddddd ddddd|jdddd d gdd|jdddddd|jdddddd|jdddddd |jd!dddd"|jd#d$dddd%|jd&d'ddd tdd(|jd)d*dddd+d d,dd-|jd.d/dddd0|jd1dddd+d d,dd2t}i}|j|\}}|jrt|d3<|jstd4nt|_n|j r'|j r'|j d5n|j rJ|j rJ|j d6n|j rj|j rjtd7n|j r|j r|j d8n|jrd9GHxtjD] }|GHqW|sd:Sn|stjd;IJtjd<IJd=Sd>|krt}|jrtjd?IJd=Sn|jr0t|d@stalls.fix_s7Output in %r will mirror the input directory %r layout.RRRs+Sorry, -j isn't supported on this platform.(5toptparset OptionParsert add_optiontFalset parse_argsRUtTrueR:RARR R t add_suffixtno_diffst list_fixesRtget_all_fix_namesR=RKRWtverbosetloggingtDEBUGtINFOt basicConfigt getLoggertsettget_fixers_from_packagetnofixR[taddtuniont differenceRR&t commonprefixRRR+R*trstriptinfoR tsortedRtrefactor_stdint doctests_onlyt processestMultiprocessingUnsupportedtAssertionErrort summarizeRStbool(R\R"tparserRxtflagsRtfixnameRYRt avail_fixestunwanted_fixesRt all_presentR[t requestedt fixer_namesRtrt((R\s$/usr/lib64/python2.7/lib2to3/main.pytmains                              (RJt __future__RR=RRRiR2R^RRR tMultiprocessRefactoringToolR RAR<R(((s$/usr/lib64/python2.7/lib2to3/main.pyts       h main.pyo000064400000023075147204472210006233 0ustar00 {fc@sdZddlmZddlZddlZddlZddlZddlZddlZddl m Z dZ de j fdYZ d Zdd ZdS( s Main program for 2to3. i(twith_statementNi(trefactorc Cs:|j}|j}tj||||ddddS(s%Return a unified diff of two strings.s (original)s (refactored)tlinetermt(t splitlinestdifflibt unified_diff(tatbtfilename((s$/usr/lib64/python2.7/lib2to3/main.pyt diff_textss    tStdoutRefactoringToolcBs;eZdZddddZdZdZdZRS(s2 A refactoring tool that can avoid overwriting its input files. Prints output to stdout. Output files can optionally be written to a different directory and or have an extra file suffix appended to their name for use in situations where you do not want to replace the input files. Rc Csv||_||_|r;|jtj r;|tj7}n||_||_||_tt |j |||dS(sF Args: fixers: A list of fixers to import. options: A dict with RefactoringTool configuration. explicit: A list of fixers to run even if they are explicit. nobackups: If true no backup '.bak' files will be created for those files that are being refactored. show_diffs: Should diffs of the refactoring be printed to stdout? input_base_dir: The base directory for all input files. This class will strip this path prefix off of filenames before substituting it with output_dir. Only meaningful if output_dir is supplied. All files processed by refactor() must start with this path. output_dir: If supplied, all converted files will be written into this directory tree instead of input_base_dir. append_suffix: If supplied, all files output by this tool will have this appended to their filename. Useful for changing .py to .py3 for example by passing append_suffix='3'. N( t nobackupst show_diffstendswithtostsept_input_base_dirt _output_dirt_append_suffixtsuperR t__init__( tselftfixerstoptionstexplicitR R tinput_base_dirt output_dirt append_suffix((s$/usr/lib64/python2.7/lib2to3/main.pyR$s     cOs3|jj|||f|jj|||dS(N(terrorstappendtloggerterror(Rtmsgtargstkwargs((s$/usr/lib64/python2.7/lib2to3/main.pyt log_errorAsc Cs|}|jre|j|jrItjj|j|t|j}qetd||jfn|jr~||j7}n||krtjj |}tjj |stj |n|j d||n|j sy|d}tjj|r6ytj|Wq6tjk r2}|j d|q6Xnytj||Wqytjk ru}|j d||qyXntt|j} | |||||j stj||n||krtj||ndS(Ns5filename %s does not start with the input_base_dir %ssWriting converted %s to %s.s.baksCan't remove backup %ssCan't rename %s to %s(Rt startswithRRtpathtjointlent ValueErrorRtdirnametisdirtmakedirst log_messageR tlexiststremoveR trenameRR t write_filetshutiltcopymode( Rtnew_textR told_texttencodingt orig_filenameRtbackupterrtwrite((s$/usr/lib64/python2.7/lib2to3/main.pyR1Es@         cCs|r|jd|n|jd||jrt|||}y_|jdk r|j(x|D] }|GHqgWtjjWdQXnx|D] }|GHqWWqtk rt d|fdSXndS(NsNo changes to %ss Refactored %ss+couldn't encode %s's diff for your terminal( R-R R t output_locktNonetsyststdouttflushtUnicodeEncodeErrortwarn(RtoldtnewR tequalt diff_linestline((s$/usr/lib64/python2.7/lib2to3/main.pyt print_outputls"        (t__name__t __module__t__doc__RR$R1RG(((s$/usr/lib64/python2.7/lib2to3/main.pyR s   'cCstjd|fIJdS(Ns WARNING: %s(R=tstderr(R!((s$/usr/lib64/python2.7/lib2to3/main.pyRAsc stjdd}|jdddddd|jd d dd d gdd |jddddd ddddd|jdddd d gdd|jdddddd|jdddddd|jdddddd |jd!dddd"|jd#d$dddd%|jd&d'ddd tdd(|jd)d*dddd+d d,dd-|jd.d/dddd0|jd1dddd+d d,dd2t}i}|j|\}}|jrt|d3<|jstd4nt|_n|j r'|j r'|j d5n|j rJ|j rJ|j d6n|j rj|j rjtd7n|j r|j r|j d8n|jrd9GHxtjD] }|GHqW|sd:Sn|stjd;IJtjd<IJd=Sd>|krt}|jrtjd?IJd=Sn|jr0t|d@stalls.fix_s7Output in %r will mirror the input directory %r layout.RRRs+Sorry, -j isn't supported on this platform.(4toptparset OptionParsert add_optiontFalset parse_argsRUtTrueR:RARR R t add_suffixtno_diffst list_fixesRtget_all_fix_namesR=RKRWtverbosetloggingtDEBUGtINFOt basicConfigt getLoggertsettget_fixers_from_packagetnofixR[taddtuniont differenceRR&t commonprefixRRR+R*trstriptinfoR tsortedRtrefactor_stdint doctests_onlyt processestMultiprocessingUnsupportedt summarizeRStbool(R\R"tparserRxtflagsRtfixnameRYRt avail_fixestunwanted_fixesRt all_presentR[t requestedt fixer_namesRtrt((R\s$/usr/lib64/python2.7/lib2to3/main.pytmains                              (RJt __future__RR=RRRiR2R^RRR tMultiprocessRefactoringToolR RAR<R(((s$/usr/lib64/python2.7/lib2to3/main.pyts       h patcomp.py000064400000015631147204472210006572 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Pattern compiler. The grammar is taken from PatternGrammar.txt. The compiler compiles a pattern to a pytree.*Pattern instance. """ __author__ = "Guido van Rossum " # Python imports import StringIO # Fairly local imports from .pgen2 import driver, literals, token, tokenize, parse, grammar # Really local imports from . import pytree from . import pygram class PatternSyntaxError(Exception): pass def tokenize_wrapper(input): """Tokenizes a string suppressing significant whitespace.""" skip = set((token.NEWLINE, token.INDENT, token.DEDENT)) tokens = tokenize.generate_tokens(StringIO.StringIO(input).readline) for quintuple in tokens: type, value, start, end, line_text = quintuple if type not in skip: yield quintuple class PatternCompiler(object): def __init__(self, grammar_file=None): """Initializer. Takes an optional alternative filename for the pattern grammar. """ if grammar_file is None: self.grammar = pygram.pattern_grammar self.syms = pygram.pattern_symbols else: self.grammar = driver.load_grammar(grammar_file) self.syms = pygram.Symbols(self.grammar) self.pygrammar = pygram.python_grammar self.pysyms = pygram.python_symbols self.driver = driver.Driver(self.grammar, convert=pattern_convert) def compile_pattern(self, input, debug=False, with_tree=False): """Compiles a pattern string to a nested pytree.*Pattern object.""" tokens = tokenize_wrapper(input) try: root = self.driver.parse_tokens(tokens, debug=debug) except parse.ParseError as e: raise PatternSyntaxError(str(e)) if with_tree: return self.compile_node(root), root else: return self.compile_node(root) def compile_node(self, node): """Compiles a node, recursively. This is one big switch on the node type. """ # XXX Optimize certain Wildcard-containing-Wildcard patterns # that can be merged if node.type == self.syms.Matcher: node = node.children[0] # Avoid unneeded recursion if node.type == self.syms.Alternatives: # Skip the odd children since they are just '|' tokens alts = [self.compile_node(ch) for ch in node.children[::2]] if len(alts) == 1: return alts[0] p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) return p.optimize() if node.type == self.syms.Alternative: units = [self.compile_node(ch) for ch in node.children] if len(units) == 1: return units[0] p = pytree.WildcardPattern([units], min=1, max=1) return p.optimize() if node.type == self.syms.NegatedUnit: pattern = self.compile_basic(node.children[1:]) p = pytree.NegatedPattern(pattern) return p.optimize() assert node.type == self.syms.Unit name = None nodes = node.children if len(nodes) >= 3 and nodes[1].type == token.EQUAL: name = nodes[0].value nodes = nodes[2:] repeat = None if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: repeat = nodes[-1] nodes = nodes[:-1] # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] pattern = self.compile_basic(nodes, repeat) if repeat is not None: assert repeat.type == self.syms.Repeater children = repeat.children child = children[0] if child.type == token.STAR: min = 0 max = pytree.HUGE elif child.type == token.PLUS: min = 1 max = pytree.HUGE elif child.type == token.LBRACE: assert children[-1].type == token.RBRACE assert len(children) in (3, 5) min = max = self.get_int(children[1]) if len(children) == 5: max = self.get_int(children[3]) else: assert False if min != 1 or max != 1: pattern = pattern.optimize() pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) if name is not None: pattern.name = name return pattern.optimize() def compile_basic(self, nodes, repeat=None): # Compile STRING | NAME [Details] | (...) | [...] assert len(nodes) >= 1 node = nodes[0] if node.type == token.STRING: value = unicode(literals.evalString(node.value)) return pytree.LeafPattern(_type_of_literal(value), value) elif node.type == token.NAME: value = node.value if value.isupper(): if value not in TOKEN_MAP: raise PatternSyntaxError("Invalid token: %r" % value) if nodes[1:]: raise PatternSyntaxError("Can't have details for token") return pytree.LeafPattern(TOKEN_MAP[value]) else: if value == "any": type = None elif not value.startswith("_"): type = getattr(self.pysyms, value, None) if type is None: raise PatternSyntaxError("Invalid symbol: %r" % value) if nodes[1:]: # Details present content = [self.compile_node(nodes[1].children[1])] else: content = None return pytree.NodePattern(type, content) elif node.value == "(": return self.compile_node(nodes[1]) elif node.value == "[": assert repeat is None subpattern = self.compile_node(nodes[1]) return pytree.WildcardPattern([[subpattern]], min=0, max=1) assert False, node def get_int(self, node): assert node.type == token.NUMBER return int(node.value) # Map named tokens to the type value for a LeafPattern TOKEN_MAP = {"NAME": token.NAME, "STRING": token.STRING, "NUMBER": token.NUMBER, "TOKEN": None} def _type_of_literal(value): if value[0].isalpha(): return token.NAME elif value in grammar.opmap: return grammar.opmap[value] else: return None def pattern_convert(grammar, raw_node_info): """Converts raw node information to a Node or Leaf instance.""" type, value, context, children = raw_node_info if children or type in grammar.number2symbol: return pytree.Node(type, children, context=context) else: return pytree.Leaf(type, value, context=context) def compile_pattern(pattern): return PatternCompiler().compile_pattern(pattern) patcomp.pyc000064400000014711147204472210006733 0ustar00 {fc@sdZdZddlZddlmZmZmZmZmZm Z ddl m Z ddl m Z de fd YZd Zd efd YZiejd 6ejd6ejd6dd6ZdZdZdZdS(sPattern compiler. The grammar is taken from PatternGrammar.txt. The compiler compiles a pattern to a pytree.*Pattern instance. s#Guido van Rossum iNi(tdrivertliteralsttokenttokenizetparsetgrammar(tpytree(tpygramtPatternSyntaxErrorcBseZRS((t__name__t __module__(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyRsc cswttjtjtjf}tjtj|j}x7|D]/}|\}}}}}||kr@|Vq@q@WdS(s6Tokenizes a string suppressing significant whitespace.N( tsetRtNEWLINEtINDENTtDEDENTRtgenerate_tokenstStringIOtreadline( tinputtskipttokenst quintuplettypetvaluetstarttendt line_text((s'/usr/lib64/python2.7/lib2to3/patcomp.pyttokenize_wrappers   tPatternCompilercBsAeZddZeedZdZddZdZRS(cCs|dkr'tj|_tj|_n'tj||_tj|j|_tj |_ tj |_ tj |jdt|_dS(s^Initializer. Takes an optional alternative filename for the pattern grammar. tconvertN(tNoneRtpattern_grammarRtpattern_symbolstsymsRt load_grammartSymbolstpython_grammart pygrammartpython_symbolstpysymstDrivertpattern_convert(tselft grammar_file((s'/usr/lib64/python2.7/lib2to3/patcomp.pyt__init__(s    cCs}t|}y|jj|d|}Wn(tjk rR}tt|nX|rl|j||fS|j|SdS(s=Compiles a pattern string to a nested pytree.*Pattern object.tdebugN(RRt parse_tokensRt ParseErrorRtstrt compile_node(R*RR-t with_treeRtrootte((s'/usr/lib64/python2.7/lib2to3/patcomp.pytcompile_pattern7s cCs|j|jjkr%|jd}n|j|jjkrg|jdddD]}|j|^qQ}t|dkr|dStjg|D]}|g^qdddd}|j S|j|jj kr=g|jD]}|j|^q}t|dkr|dStj|gdddd}|j S|j|jj kr|j |jd}tj |}|j S|j|jjkstd}|j} t| dkr| djtjkr| dj}| d} nd} t| dkr5| dj|jjkr5| d} | d } n|j | | }| dk r| j|jjksnt| j} | d} | jtjkrd} tj}n| jtjkrd} tj}n| jtjkrQ| djtjkstt| d kst|j| d} }t| d kr]|j| d}q]n ts]t| dksu|dkr|j }tj|ggd| d|}qn|dk r||_n|j S( sXCompiles a node, recursively. This is one big switch on the node type. iNiitmintmaxiii(ii(RR!tMatchertchildrent AlternativesR1tlenRtWildcardPatterntoptimizet Alternativet NegatedUnitt compile_basictNegatedPatterntUnittAssertionErrorRRtEQUALRtRepeatertSTARtHUGEtPLUStLBRACEtRBRACEtget_inttFalsetname(R*tnodetchtaltstatptunitstpatternRMtnodestrepeatR9tchildR6R7((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR1Csh21 %   (  +         '  cCst|dkst|d}|jtjkrbttj|j}t j t ||S|jtj krp|j}|j r|tkrtd|n|drtdnt j t|S|dkrd}nF|jds-t|j|d}|dkr-td|q-n|drW|j|djdg}nd}t j||Sns|jdkr|j|dS|jd kr|dkst|j|d}t j|ggd dd dStst|dS( NiisInvalid token: %rsCan't have details for tokentanyt_sInvalid symbol: %rt(t[R6R7(R;RCRRtSTRINGtunicodeRt evalStringRRt LeafPatternt_type_of_literaltNAMEtisuppert TOKEN_MAPRRt startswithtgetattrR'R1R9t NodePatternR<RL(R*RURVRNRRtcontentt subpattern((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR@s<          cCs%|jtjkstt|jS(N(RRtNUMBERRCtintR(R*RN((s'/usr/lib64/python2.7/lib2to3/patcomp.pyRKsN( R R RR,RLR5R1R@RK(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR&s   G #RaR\RitTOKENcCs9|djrtjS|tjkr1tj|SdSdS(Ni(tisalphaRRaRtopmapR(R((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR`s  cCsW|\}}}}|s'||jkr=tj||d|Stj||d|SdS(s9Converts raw node information to a Node or Leaf instance.tcontextN(t number2symbolRtNodetLeaf(Rt raw_node_infoRRRnR9((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR)scCstj|S(N(RR5(RT((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR5s(t__doc__t __author__Rtpgen2RRRRRRtRRt ExceptionRRtobjectRRaR\RiRRcR`R)R5(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyt s .      patcomp.pyo000064400000014221147204472210006743 0ustar00 {fc@sdZdZddlZddlmZmZmZmZmZm Z ddl m Z ddl m Z de fd YZd Zd efd YZiejd 6ejd6ejd6dd6ZdZdZdZdS(sPattern compiler. The grammar is taken from PatternGrammar.txt. The compiler compiles a pattern to a pytree.*Pattern instance. s#Guido van Rossum iNi(tdrivertliteralsttokenttokenizetparsetgrammar(tpytree(tpygramtPatternSyntaxErrorcBseZRS((t__name__t __module__(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyRsc cswttjtjtjf}tjtj|j}x7|D]/}|\}}}}}||kr@|Vq@q@WdS(s6Tokenizes a string suppressing significant whitespace.N( tsetRtNEWLINEtINDENTtDEDENTRtgenerate_tokenstStringIOtreadline( tinputtskipttokenst quintuplettypetvaluetstarttendt line_text((s'/usr/lib64/python2.7/lib2to3/patcomp.pyttokenize_wrappers   tPatternCompilercBsAeZddZeedZdZddZdZRS(cCs|dkr'tj|_tj|_n'tj||_tj|j|_tj |_ tj |_ tj |jdt|_dS(s^Initializer. Takes an optional alternative filename for the pattern grammar. tconvertN(tNoneRtpattern_grammarRtpattern_symbolstsymsRt load_grammartSymbolstpython_grammart pygrammartpython_symbolstpysymstDrivertpattern_convert(tselft grammar_file((s'/usr/lib64/python2.7/lib2to3/patcomp.pyt__init__(s    cCs}t|}y|jj|d|}Wn(tjk rR}tt|nX|rl|j||fS|j|SdS(s=Compiles a pattern string to a nested pytree.*Pattern object.tdebugN(RRt parse_tokensRt ParseErrorRtstrt compile_node(R*RR-t with_treeRtrootte((s'/usr/lib64/python2.7/lib2to3/patcomp.pytcompile_pattern7s cCsT|j|jjkr%|jd}n|j|jjkrg|jdddD]}|j|^qQ}t|dkr|dStjg|D]}|g^qdddd}|j S|j|jj kr=g|jD]}|j|^q}t|dkr|dStj|gdddd}|j S|j|jj kr|j |jd}tj |}|j Sd}|j} t| dkr| djtjkr| dj}| d} nd} t| dkr| dj|jjkr| d} | d } n|j | | }| dk r2| j} | d} | jtjkrod} tj}nx| jtjkrd} tj}nT| jtjkr|j| d} }t| d kr|j| d}qn| dks|dkr2|j }tj|ggd| d|}q2n|dk rJ||_n|j S( sXCompiles a node, recursively. This is one big switch on the node type. iNiitmintmaxiii(RR!tMatchertchildrent AlternativesR1tlenRtWildcardPatterntoptimizet Alternativet NegatedUnitt compile_basictNegatedPatternRRtEQUALRtRepeatertSTARtHUGEtPLUStLBRACEtget_inttname(R*tnodetchtaltstatptunitstpatternRItnodestrepeatR9tchildR6R7((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR1Cs^21 %   (  +        '  cCs|d}|jtjkrJttj|j}tjt ||S|jtj krX|j}|j r|t krt d|n|drt dntjt |S|dkrd}nF|jdst|j|d}|dkrt d|qn|dr?|j|djdg}nd}tj||Sna|jdkrx|j|dS|jd kr|j|d}tj|ggd dd dSdS( NisInvalid token: %risCan't have details for tokentanyt_sInvalid symbol: %rt(t[R6R7(RRtSTRINGtunicodeRt evalStringRRt LeafPatternt_type_of_literaltNAMEtisuppert TOKEN_MAPRRt startswithtgetattrR'R1R9t NodePatternR<(R*RQRRRJRRtcontentt subpattern((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR@s8          cCs t|jS(N(tintR(R*RJ((s'/usr/lib64/python2.7/lib2to3/patcomp.pyRHsN( R R RR,tFalseR5R1R@RH(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR&s   G #R]RXtNUMBERtTOKENcCs9|djrtjS|tjkr1tj|SdSdS(Ni(tisalphaRR]RtopmapR(R((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR\s  cCsW|\}}}}|s'||jkr=tj||d|Stj||d|SdS(s9Converts raw node information to a Node or Leaf instance.tcontextN(t number2symbolRtNodetLeaf(Rt raw_node_infoRRRkR9((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR)scCstj|S(N(RR5(RP((s'/usr/lib64/python2.7/lib2to3/patcomp.pyR5s(t__doc__t __author__Rtpgen2RRRRRRtRRt ExceptionRRtobjectRR]RXRgRR_R\R)R5(((s'/usr/lib64/python2.7/lib2to3/patcomp.pyt s .      pygram.py000064400000002206147204472210006420 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Export the Python grammar and symbols.""" # Python imports import os # Local imports from .pgen2 import token from .pgen2 import driver from . import pytree # The grammar file _GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") _PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "PatternGrammar.txt") class Symbols(object): def __init__(self, grammar): """Initializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). """ for name, symbol in grammar.symbol2number.iteritems(): setattr(self, name, symbol) python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) python_symbols = Symbols(python_grammar) python_grammar_no_print_statement = python_grammar.copy() del python_grammar_no_print_statement.keywords["print"] pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) pattern_symbols = Symbols(pattern_grammar) pygram.pyc000064400000002641147204472210006566 0ustar00 {fc@sdZddlZddlmZddlmZddlmZejjejj e dZ ejjejj e dZ d e fd YZejd e ZeeZejZejd =ejd e ZeeZdS( s&Export the Python grammar and symbols.iNi(ttoken(tdriver(tpytrees Grammar.txtsPatternGrammar.txttSymbolscBseZdZRS(cCs4x-|jjD]\}}t|||qWdS(sInitializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). N(t symbol2numbert iteritemstsetattr(tselftgrammartnametsymbol((s&/usr/lib64/python2.7/lib2to3/pygram.pyt__init__s(t__name__t __module__R (((s&/usr/lib64/python2.7/lib2to3/pygram.pyRstlib2to3tprint(t__doc__tostpgen2RRtRtpathtjointdirnamet__file__t _GRAMMAR_FILEt_PATTERN_GRAMMAR_FILEtobjectRtload_packaged_grammartpython_grammartpython_symbolstcopyt!python_grammar_no_print_statementtkeywordstpattern_grammartpattern_symbols(((s&/usr/lib64/python2.7/lib2to3/pygram.pyts !     pygram.pyo000064400000002641147204472210006602 0ustar00 {fc@sdZddlZddlmZddlmZddlmZejjejj e dZ ejjejj e dZ d e fd YZejd e ZeeZejZejd =ejd e ZeeZdS( s&Export the Python grammar and symbols.iNi(ttoken(tdriver(tpytrees Grammar.txtsPatternGrammar.txttSymbolscBseZdZRS(cCs4x-|jjD]\}}t|||qWdS(sInitializer. Creates an attribute for each grammar symbol (nonterminal), whose value is the symbol's type (an int >= 256). N(t symbol2numbert iteritemstsetattr(tselftgrammartnametsymbol((s&/usr/lib64/python2.7/lib2to3/pygram.pyt__init__s(t__name__t __module__R (((s&/usr/lib64/python2.7/lib2to3/pygram.pyRstlib2to3tprint(t__doc__tostpgen2RRtRtpathtjointdirnamet__file__t _GRAMMAR_FILEt_PATTERN_GRAMMAR_FILEtobjectRtload_packaged_grammartpython_grammartpython_symbolstcopyt!python_grammar_no_print_statementtkeywordstpattern_grammartpattern_symbols(((s&/usr/lib64/python2.7/lib2to3/pygram.pyts !     pytree.py000064400000070557147204472210006447 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """ Python parse tree definitions. This is a very concrete parse tree; we need to keep every token and even the comments and whitespace between tokens. There's also a pattern matching implementation here. """ __author__ = "Guido van Rossum " import sys import warnings from StringIO import StringIO HUGE = 0x7FFFFFFF # maximum repeat count, default max _type_reprs = {} def type_repr(type_num): global _type_reprs if not _type_reprs: from .pygram import python_symbols # printing tokens is possible but not as useful # from .pgen2 import token // token.__dict__.items(): for name, val in python_symbols.__dict__.items(): if type(val) == int: _type_reprs[val] = name return _type_reprs.setdefault(type_num, type_num) class Base(object): """ Abstract base class for Node and Leaf. This provides some default functionality and boilerplate using the template pattern. A node may be a subnode of at most one parent. """ # Default values for instance variables type = None # int: token number (< 256) or symbol number (>= 256) parent = None # Parent node pointer, or None children = () # Tuple of subnodes was_changed = False was_checked = False def __new__(cls, *args, **kwds): """Constructor that prevents Base from being instantiated.""" assert cls is not Base, "Cannot instantiate Base" return object.__new__(cls) def __eq__(self, other): """ Compare two nodes for equality. This calls the method _eq(). """ if self.__class__ is not other.__class__: return NotImplemented return self._eq(other) __hash__ = None # For Py3 compatibility. def __ne__(self, other): """ Compare two nodes for inequality. This calls the method _eq(). """ if self.__class__ is not other.__class__: return NotImplemented return not self._eq(other) def _eq(self, other): """ Compare two nodes for equality. This is called by __eq__ and __ne__. It is only called if the two nodes have the same type. This must be implemented by the concrete subclass. Nodes should be considered equal if they have the same structure, ignoring the prefix string and other context information. """ raise NotImplementedError def clone(self): """ Return a cloned (deep) copy of self. This must be implemented by the concrete subclass. """ raise NotImplementedError def post_order(self): """ Return a post-order iterator for the tree. This must be implemented by the concrete subclass. """ raise NotImplementedError def pre_order(self): """ Return a pre-order iterator for the tree. This must be implemented by the concrete subclass. """ raise NotImplementedError def set_prefix(self, prefix): """ Set the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("set_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) self.prefix = prefix def get_prefix(self): """ Return the prefix for the node (see Leaf class). DEPRECATED; use the prefix property directly. """ warnings.warn("get_prefix() is deprecated; use the prefix property", DeprecationWarning, stacklevel=2) return self.prefix def replace(self, new): """Replace this node with a new one in the parent.""" assert self.parent is not None, str(self) assert new is not None if not isinstance(new, list): new = [new] l_children = [] found = False for ch in self.parent.children: if ch is self: assert not found, (self.parent.children, self, new) if new is not None: l_children.extend(new) found = True else: l_children.append(ch) assert found, (self.children, self, new) self.parent.changed() self.parent.children = l_children for x in new: x.parent = self.parent self.parent = None def get_lineno(self): """Return the line number which generated the invocant node.""" node = self while not isinstance(node, Leaf): if not node.children: return node = node.children[0] return node.lineno def changed(self): if self.parent: self.parent.changed() self.was_changed = True def remove(self): """ Remove the node from the tree. Returns the position of the node in its parent's children before it was removed. """ if self.parent: for i, node in enumerate(self.parent.children): if node is self: self.parent.changed() del self.parent.children[i] self.parent = None return i @property def next_sibling(self): """ The node immediately following the invocant in their parent's children list. If the invocant does not have a next sibling, it is None """ if self.parent is None: return None # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i+1] except IndexError: return None @property def prev_sibling(self): """ The node immediately preceding the invocant in their parent's children list. If the invocant does not have a previous sibling, it is None. """ if self.parent is None: return None # Can't use index(); we need to test by identity for i, child in enumerate(self.parent.children): if child is self: if i == 0: return None return self.parent.children[i-1] def leaves(self): for child in self.children: for x in child.leaves(): yield x def depth(self): if self.parent is None: return 0 return 1 + self.parent.depth() def get_suffix(self): """ Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix """ next_sib = self.next_sibling if next_sib is None: return u"" return next_sib.prefix if sys.version_info < (3, 0): def __str__(self): return unicode(self).encode("ascii") class Node(Base): """Concrete implementation for interior nodes.""" def __init__(self,type, children, context=None, prefix=None, fixers_applied=None): """ Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. """ assert type >= 256, type self.type = type self.children = list(children) for ch in self.children: assert ch.parent is None, repr(ch) ch.parent = self if prefix is not None: self.prefix = prefix if fixers_applied: self.fixers_applied = fixers_applied[:] else: self.fixers_applied = None def __repr__(self): """Return a canonical string representation.""" return "%s(%s, %r)" % (self.__class__.__name__, type_repr(self.type), self.children) def __unicode__(self): """ Return a pretty string representation. This reproduces the input source exactly. """ return u"".join(map(unicode, self.children)) if sys.version_info > (3, 0): __str__ = __unicode__ def _eq(self, other): """Compare two nodes for equality.""" return (self.type, self.children) == (other.type, other.children) def clone(self): """Return a cloned (deep) copy of self.""" return Node(self.type, [ch.clone() for ch in self.children], fixers_applied=self.fixers_applied) def post_order(self): """Return a post-order iterator for the tree.""" for child in self.children: for node in child.post_order(): yield node yield self def pre_order(self): """Return a pre-order iterator for the tree.""" yield self for child in self.children: for node in child.pre_order(): yield node def _prefix_getter(self): """ The whitespace and comments preceding this node in the input. """ if not self.children: return "" return self.children[0].prefix def _prefix_setter(self, prefix): if self.children: self.children[0].prefix = prefix prefix = property(_prefix_getter, _prefix_setter) def set_child(self, i, child): """ Equivalent to 'node.children[i] = child'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children[i].parent = None self.children[i] = child self.changed() def insert_child(self, i, child): """ Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.insert(i, child) self.changed() def append_child(self, child): """ Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. """ child.parent = self self.children.append(child) self.changed() class Leaf(Base): """Concrete implementation for leaf nodes.""" # Default values for instance variables _prefix = "" # Whitespace and comments preceding this token in the input lineno = 0 # Line where this token starts in the input column = 0 # Column where this token tarts in the input def __init__(self, type, value, context=None, prefix=None, fixers_applied=[]): """ Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. """ assert 0 <= type < 256, type if context is not None: self._prefix, (self.lineno, self.column) = context self.type = type self.value = value if prefix is not None: self._prefix = prefix self.fixers_applied = fixers_applied[:] def __repr__(self): """Return a canonical string representation.""" return "%s(%r, %r)" % (self.__class__.__name__, self.type, self.value) def __unicode__(self): """ Return a pretty string representation. This reproduces the input source exactly. """ return self.prefix + unicode(self.value) if sys.version_info > (3, 0): __str__ = __unicode__ def _eq(self, other): """Compare two nodes for equality.""" return (self.type, self.value) == (other.type, other.value) def clone(self): """Return a cloned (deep) copy of self.""" return Leaf(self.type, self.value, (self.prefix, (self.lineno, self.column)), fixers_applied=self.fixers_applied) def leaves(self): yield self def post_order(self): """Return a post-order iterator for the tree.""" yield self def pre_order(self): """Return a pre-order iterator for the tree.""" yield self def _prefix_getter(self): """ The whitespace and comments preceding this token in the input. """ return self._prefix def _prefix_setter(self, prefix): self.changed() self._prefix = prefix prefix = property(_prefix_getter, _prefix_setter) def convert(gr, raw_node): """ Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. """ type, value, context, children = raw_node if children or type in gr.number2symbol: # If there's exactly one child, return that child instead of # creating a new node. if len(children) == 1: return children[0] return Node(type, children, context=context) else: return Leaf(type, value, context=context) class BasePattern(object): """ A pattern is a tree matching pattern. It looks for a specific node type (token or symbol), and optionally for a specific content. This is an abstract base class. There are three concrete subclasses: - LeafPattern matches a single leaf node; - NodePattern matches a single node (usually non-leaf); - WildcardPattern matches a sequence of nodes of variable length. """ # Defaults for instance variables type = None # Node type (token if < 256, symbol if >= 256) content = None # Optional content matching pattern name = None # Optional name used to store match in results dict def __new__(cls, *args, **kwds): """Constructor that prevents BasePattern from being instantiated.""" assert cls is not BasePattern, "Cannot instantiate BasePattern" return object.__new__(cls) def __repr__(self): args = [type_repr(self.type), self.content, self.name] while args and args[-1] is None: del args[-1] return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) def optimize(self): """ A subclass can define this as a hook for optimizations. Returns either self or another node with the same effect. """ return self def match(self, node, results=None): """ Does this pattern exactly match a node? Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. Default implementation for non-wildcard patterns. """ if self.type is not None and node.type != self.type: return False if self.content is not None: r = None if results is not None: r = {} if not self._submatch(node, r): return False if r: results.update(r) if results is not None and self.name: results[self.name] = node return True def match_seq(self, nodes, results=None): """ Does this pattern exactly match a sequence of nodes? Default implementation for non-wildcard patterns. """ if len(nodes) != 1: return False return self.match(nodes[0], results) def generate_matches(self, nodes): """ Generator yielding all matches for this pattern. Default implementation for non-wildcard patterns. """ r = {} if nodes and self.match(nodes[0], r): yield 1, r class LeafPattern(BasePattern): def __init__(self, type=None, content=None, name=None): """ Initializer. Takes optional type, content, and name. The type, if given must be a token type (< 256). If not given, this matches any *leaf* node; the content may still be required. The content, if given, must be a string. If a name is given, the matching node is stored in the results dict under that key. """ if type is not None: assert 0 <= type < 256, type if content is not None: assert isinstance(content, basestring), repr(content) self.type = type self.content = content self.name = name def match(self, node, results=None): """Override match() to insist on a leaf node.""" if not isinstance(node, Leaf): return False return BasePattern.match(self, node, results) def _submatch(self, node, results=None): """ Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. """ return self.content == node.value class NodePattern(BasePattern): wildcards = False def __init__(self, type=None, content=None, name=None): """ Initializer. Takes optional type, content, and name. The type, if given, must be a symbol type (>= 256). If the type is None this matches *any* single node (leaf or not), except if content is not None, in which it only matches non-leaf nodes that also match the content pattern. The content, if not None, must be a sequence of Patterns that must match the node's children exactly. If the content is given, the type must not be None. If a name is given, the matching node is stored in the results dict under that key. """ if type is not None: assert type >= 256, type if content is not None: assert not isinstance(content, basestring), repr(content) content = list(content) for i, item in enumerate(content): assert isinstance(item, BasePattern), (i, item) if isinstance(item, WildcardPattern): self.wildcards = True self.type = type self.content = content self.name = name def _submatch(self, node, results=None): """ Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. """ if self.wildcards: for c, r in generate_matches(self.content, node.children): if c == len(node.children): if results is not None: results.update(r) return True return False if len(self.content) != len(node.children): return False for subpattern, child in zip(self.content, node.children): if not subpattern.match(child, results): return False return True class WildcardPattern(BasePattern): """ A wildcard pattern can match zero or more nodes. This has all the flexibility needed to implement patterns like: .* .+ .? .{m,n} (a b c | d e | f) (...)* (...)+ (...)? (...){m,n} except it always uses non-greedy matching. """ def __init__(self, content=None, min=0, max=HUGE, name=None): """ Initializer. Args: content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] min: optional minimum number of times to match, default 0 max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is equivalent to (a b c | d e | f g h); if content is None, this is equivalent to '.' in regular expression terms. The min and max parameters work as follows: min=0, max=maxint: .* min=1, max=maxint: .+ min=0, max=1: .? min=1, max=1: . If content is not None, replace the dot with the parenthesized list of alternatives, e.g. (a b c | d e | f g h)* """ assert 0 <= min <= max <= HUGE, (min, max) if content is not None: content = tuple(map(tuple, content)) # Protect against alterations # Check sanity of alternatives assert len(content), repr(content) # Can't have zero alternatives for alt in content: assert len(alt), repr(alt) # Can have empty alternatives self.content = content self.min = min self.max = max self.name = name def optimize(self): """Optimize certain stacked wildcard patterns.""" subpattern = None if (self.content is not None and len(self.content) == 1 and len(self.content[0]) == 1): subpattern = self.content[0][0] if self.min == 1 and self.max == 1: if self.content is None: return NodePattern(name=self.name) if subpattern is not None and self.name == subpattern.name: return subpattern.optimize() if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and subpattern.min <= 1 and self.name == subpattern.name): return WildcardPattern(subpattern.content, self.min*subpattern.min, self.max*subpattern.max, subpattern.name) return self def match(self, node, results=None): """Does this pattern exactly match a node?""" return self.match_seq([node], results) def match_seq(self, nodes, results=None): """Does this pattern exactly match a sequence of nodes?""" for c, r in self.generate_matches(nodes): if c == len(nodes): if results is not None: results.update(r) if self.name: results[self.name] = list(nodes) return True return False def generate_matches(self, nodes): """ Generator yielding matches for a sequence of nodes. Args: nodes: sequence of nodes Yields: (count, results) tuples where: count: the match comprises nodes[:count]; results: dict containing named submatches. """ if self.content is None: # Shortcut for special case (see __init__.__doc__) for count in xrange(self.min, 1 + min(len(nodes), self.max)): r = {} if self.name: r[self.name] = nodes[:count] yield count, r elif self.name == "bare_name": yield self._bare_name_matches(nodes) else: # The reason for this is that hitting the recursion limit usually # results in some ugly messages about how RuntimeErrors are being # ignored. We don't do this on non-CPython implementation because # they don't have this problem. if hasattr(sys, "getrefcount"): save_stderr = sys.stderr sys.stderr = StringIO() try: for count, r in self._recursive_matches(nodes, 0): if self.name: r[self.name] = nodes[:count] yield count, r except RuntimeError: # We fall back to the iterative pattern matching scheme if the recursive # scheme hits the recursion limit. for count, r in self._iterative_matches(nodes): if self.name: r[self.name] = nodes[:count] yield count, r finally: if hasattr(sys, "getrefcount"): sys.stderr = save_stderr def _iterative_matches(self, nodes): """Helper to iteratively yield the matches.""" nodelen = len(nodes) if 0 >= self.min: yield 0, {} results = [] # generate matches that use just one alt from self.content for alt in self.content: for c, r in generate_matches(alt, nodes): yield c, r results.append((c, r)) # for each match, iterate down the nodes while results: new_results = [] for c0, r0 in results: # stop if the entire set of nodes has been matched if c0 < nodelen and c0 <= self.max: for alt in self.content: for c1, r1 in generate_matches(alt, nodes[c0:]): if c1 > 0: r = {} r.update(r0) r.update(r1) yield c0 + c1, r new_results.append((c0 + c1, r)) results = new_results def _bare_name_matches(self, nodes): """Special optimized matcher for bare_name.""" count = 0 r = {} done = False max = len(nodes) while not done and count < max: done = True for leaf in self.content: if leaf[0].match(nodes[count], r): count += 1 done = False break r[self.name] = nodes[:count] return count, r def _recursive_matches(self, nodes, count): """Helper to recursively yield the matches.""" assert self.content is not None if count >= self.min: yield 0, {} if count < self.max: for alt in self.content: for c0, r0 in generate_matches(alt, nodes): for c1, r1 in self._recursive_matches(nodes[c0:], count+1): r = {} r.update(r0) r.update(r1) yield c0 + c1, r class NegatedPattern(BasePattern): def __init__(self, content=None): """ Initializer. The argument is either a pattern or None. If it is None, this only matches an empty sequence (effectively '$' in regex lingo). If it is not None, this matches whenever the argument pattern doesn't have any matches. """ if content is not None: assert isinstance(content, BasePattern), repr(content) self.content = content def match(self, node): # We never match a node in its entirety return False def match_seq(self, nodes): # We only match an empty sequence of nodes in its entirety return len(nodes) == 0 def generate_matches(self, nodes): if self.content is None: # Return a match if there is an empty sequence if len(nodes) == 0: yield 0, {} else: # Return a match if the argument pattern has no matches for c, r in self.content.generate_matches(nodes): return yield 0, {} def generate_matches(patterns, nodes): """ Generator yielding matches for a sequence of patterns and nodes. Args: patterns: a sequence of patterns nodes: a sequence of nodes Yields: (count, results) tuples where: count: the entire sequence of patterns matches nodes[:count]; results: dict containing named submatches. """ if not patterns: yield 0, {} else: p, rest = patterns[0], patterns[1:] for c0, r0 in p.generate_matches(nodes): if not rest: yield c0, r0 else: for c1, r1 in generate_matches(rest, nodes[c0:]): r = {} r.update(r0) r.update(r1) yield c0 + c1, r pytree.pyc000064400000073137147204472210006607 0ustar00 {fc@sdZdZddlZddlZddlmZdZiadZdefdYZ d e fd YZ d e fd YZ d Z defdYZ de fdYZde fdYZde fdYZde fdYZdZdS(s Python parse tree definitions. This is a very concrete parse tree; we need to keep every token and even the comments and whitespace between tokens. There's also a pattern matching implementation here. s#Guido van Rossum iN(tStringIOicCshtsXddlm}x?|jjD]+\}}t|tkr&|t|tpropertyRARBRCRDRFtsyst version_infoRJ(((s&/usr/lib64/python2.7/lib2to3/pytree.pyR s6            tNodecBseZdZddddZdZdZejdkrHeZ ndZ dZ dZ d Z d Zd ZeeeZd Zd ZdZRS(s+Concrete implementation for interior nodes.cCs|dkst|||_t||_x;|jD]0}|jdksatt|||_q:W|dk r||_n|r||_n d|_dS(s Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. iN( RRR*R,R&R'treprR#tfixers_applied(RRR,tcontextR#RUR4((s&/usr/lib64/python2.7/lib2to3/pytree.pyt__init__s  !    cCs#d|jjt|j|jfS(s)Return a canonical string representation.s %s(%s, %r)(RRKR RR,(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt__repr__ s  cCsdjtt|jS(sk Return a pretty string representation. This reproduces the input source exactly. u(tjointmapRHR,(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt __unicode__siicCs"|j|jf|j|jfkS(sCompare two nodes for equality.(RR,(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs5t|jg|jD]}|j^qd|jS(s$Return a cloned (deep) copy of self.RU(RSRR,RRU(RR4((s&/usr/lib64/python2.7/lib2to3/pytree.pyR!s+ccs9x-|jD]"}x|jD] }|VqWq W|VdS(s*Return a post-order iterator for the tree.N(R,R(RR@R9((s&/usr/lib64/python2.7/lib2to3/pytree.pyR&s ccs9|Vx-|jD]"}x|jD] }|Vq"WqWdS(s)Return a pre-order iterator for the tree.N(R,R(RR@R9((s&/usr/lib64/python2.7/lib2to3/pytree.pyR-scCs|js dS|jdjS(sO The whitespace and comments preceding this node in the input. ti(R,R#(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt_prefix_getter4s cCs |jr||jd_ndS(Ni(R,R#(RR#((s&/usr/lib64/python2.7/lib2to3/pytree.pyt_prefix_setter<s cCs4||_d|j|_||j|<|jdS(s Equivalent to 'node.children[i] = child'. This method also sets the child's parent attribute appropriately. N(R&R'R,R0(RR=R@((s&/usr/lib64/python2.7/lib2to3/pytree.pyt set_childBs  cCs*||_|jj|||jdS(s Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. N(R&R,tinsertR0(RR=R@((s&/usr/lib64/python2.7/lib2to3/pytree.pyt insert_childLs cCs'||_|jj||jdS(s Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. N(R&R,R/R0(RR@((s&/usr/lib64/python2.7/lib2to3/pytree.pyt append_childUs N(ii(RKRLRMR'RWRXR[RQRRRJRRRRR]R^RPR#R_RaRb(((s&/usr/lib64/python2.7/lib2to3/pytree.pyRSs$           R7cBseZdZdZdZdZddgdZdZdZ e j dkrZe Z ndZ dZd Zd Zd Zd Zd ZeeeZRS(s'Concrete implementation for leaf nodes.R\icCsd|kodkns(t||dk rR|\|_\|_|_n||_||_|dk r|||_n||_dS(s Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. iiN(RR't_prefixR8tcolumnRtvalueRU(RRReRVR#RU((s&/usr/lib64/python2.7/lib2to3/pytree.pyRWhs (     cCsd|jj|j|jfS(s)Return a canonical string representation.s %s(%r, %r)(RRKRRe(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRX{s cCs|jt|jS(sk Return a pretty string representation. This reproduces the input source exactly. (R#RHRe(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyR[sicCs"|j|jf|j|jfkS(sCompare two nodes for equality.(RRe(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs4t|j|j|j|j|jffd|jS(s$Return a cloned (deep) copy of self.RU(R7RReR#R8RdRU(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRsccs |VdS(N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRCsccs |VdS(s*Return a post-order iterator for the tree.N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRsccs |VdS(s)Return a pre-order iterator for the tree.N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs|jS(sP The whitespace and comments preceding this token in the input. (Rc(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyR]scCs|j||_dS(N(R0Rc(RR#((s&/usr/lib64/python2.7/lib2to3/pytree.pyR^s N(ii(RKRLRMRcR8RdR'RWRXR[RQRRRJRRRCRRR]R^RPR#(((s&/usr/lib64/python2.7/lib2to3/pytree.pyR7_s&           cCsk|\}}}}|s'||jkrTt|dkrA|dSt||d|St||d|SdS(s Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. iiRVN(t number2symboltlenRSR7(tgrtraw_nodeRReRVR,((s&/usr/lib64/python2.7/lib2to3/pytree.pytconverts t BasePatterncBs\eZdZdZdZdZdZdZdZ ddZ ddZ dZ RS(s A pattern is a tree matching pattern. It looks for a specific node type (token or symbol), and optionally for a specific content. This is an abstract base class. There are three concrete subclasses: - LeafPattern matches a single leaf node; - NodePattern matches a single node (usually non-leaf); - WildcardPattern matches a sequence of nodes of variable length. cOs%|tk stdtj|S(s>Constructor that prevents BasePattern from being instantiated.sCannot instantiate BasePattern(RkRRR(RRR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCsht|j|j|jg}x!|rA|ddkrA|d=q!Wd|jjdjtt |fS(Nis%s(%s)s, ( R RtcontentR R'RRKRYRZRT(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRXs cCs|S(s A subclass can define this as a hook for optimizations. Returns either self or another node with the same effect. ((R((s&/usr/lib64/python2.7/lib2to3/pytree.pytoptimizescCs|jdk r%|j|jkr%tS|jdk r~d}|dk rOi}n|j||setS|r~|j|q~n|dk r|jr|||j= 256). If the type is None this matches *any* single node (leaf or not), except if content is not None, in which it only matches non-leaf nodes that also match the content pattern. The content, if not None, must be a sequence of Patterns that must match the node's children exactly. If the content is given, the type must not be None. If a name is given, the matching node is stored in the results dict under that key. iN(R'RR)RwRTR*R<RktWildcardPatternR.t wildcardsRRlR (RRRlR R=titem((s&/usr/lib64/python2.7/lib2to3/pytree.pyRWFs  " !  cCs|jrhxXt|j|jD]A\}}|t|jkr|dk r\|j|ntSqWtSt|jt|jkrtSx9t |j|jD]"\}}|j ||stSqWtS(s Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. N( RzRuRlR,RgR'RoR.R+tzipRr(RR9RptcRqt subpatternR@((s&/usr/lib64/python2.7/lib2to3/pytree.pyRncs " "N(RKRLR+RzR'RWRn(((s&/usr/lib64/python2.7/lib2to3/pytree.pyRxBsRycBsheZdZd ded dZdZd dZd dZdZ dZ dZ d Z RS( s A wildcard pattern can match zero or more nodes. This has all the flexibility needed to implement patterns like: .* .+ .? .{m,n} (a b c | d e | f) (...)* (...)+ (...)? (...){m,n} except it always uses non-greedy matching. icCsd|ko"|ko"tkns9t||f|dk rttt|}t|sxtt|x/|D]$}t|stt|qWn||_||_||_ ||_ dS(s Initializer. Args: content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] min: optional minimum number of times to match, default 0 max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is equivalent to (a b c | d e | f g h); if content is None, this is equivalent to '.' in regular expression terms. The min and max parameters work as follows: min=0, max=maxint: .* min=1, max=maxint: .+ min=0, max=1: .? min=1, max=1: . If content is not None, replace the dot with the parenthesized list of alternatives, e.g. (a b c | d e | f g h)* iN( tHUGERR'ttupleRZRgRTRltmintmaxR (RRlRRR talt((s&/usr/lib64/python2.7/lib2to3/pytree.pyRWs9  %   cCs/d}|jdk rWt|jdkrWt|jddkrW|jdd}n|jdkr|jdkr|jdkrtd|jS|dk r|j|jkr|jSn|jdkr+t|t r+|jdkr+|j|jkr+t |j|j|j|j|j|jS|S(s+Optimize certain stacked wildcard patterns.iiR N( R'RlRgRRRxR RmR)Ry(RR~((s&/usr/lib64/python2.7/lib2to3/pytree.pyRms . !    cCs|j|g|S(s'Does this pattern exactly match a node?(Rt(RR9Rp((s&/usr/lib64/python2.7/lib2to3/pytree.pyRrscCsuxn|j|D]]\}}|t|kr|dk ri|j||jrit|||j s"   pN V,=#pytree.pyo000064400000071354147204472210006622 0ustar00 {fc@sdZdZddlZddlZddlmZdZiadZdefdYZ d e fd YZ d e fd YZ d Z defdYZ de fdYZde fdYZde fdYZde fdYZdZdS(s Python parse tree definitions. This is a very concrete parse tree; we need to keep every token and even the comments and whitespace between tokens. There's also a pattern matching implementation here. s#Guido van Rossum iN(tStringIOicCshtsXddlm}x?|jjD]+\}}t|tkr&|t|((s&/usr/lib64/python2.7/lib2to3/pytree.pyt prev_siblings  ccs4x-|jD]"}x|jD] }|VqWq WdS(N(R)tleaves(RR>R3((s&/usr/lib64/python2.7/lib2to3/pytree.pyRAscCs$|jdkrdSd|jjS(Nii(R(R*tdepth(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRBscCs |j}|dkrdS|jS(s Return the string immediately following the invocant node. This is effectively equivalent to node.next_sibling.prefix uN(R?R*R"(Rtnext_sib((s&/usr/lib64/python2.7/lib2to3/pytree.pyt get_suffixs  iicCst|jdS(Ntascii(tunicodetencode(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt__str__sN((ii(!t__name__t __module__t__doc__R*RR(R)R'R9t was_checkedRRt__hash__RRRRRR#R$R4R8R.R<tpropertyR?R@RARBRDtsyst version_infoRH(((s&/usr/lib64/python2.7/lib2to3/pytree.pyR s6            tNodecBseZdZddddZdZdZejdkrHeZ ndZ dZ dZ d Z d Zd ZeeeZd Zd ZdZRS(s+Concrete implementation for interior nodes.cCsm||_t||_x|jD]}||_q"W|dk rM||_n|r`||_n d|_dS(s Initializer. Takes a type constant (a symbol number >= 256), a sequence of child nodes, and an optional context keyword argument. As a side effect, the parent pointers of the children are updated. N(RR&R)R(R*R"tfixers_applied(RRR)tcontextR"RRR2((s&/usr/lib64/python2.7/lib2to3/pytree.pyt__init__s     cCs#d|jjt|j|jfS(s)Return a canonical string representation.s %s(%s, %r)(RRIR RR)(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt__repr__ s  cCsdjtt|jS(sk Return a pretty string representation. This reproduces the input source exactly. u(tjointmapRFR)(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt __unicode__siicCs"|j|jf|j|jfkS(sCompare two nodes for equality.(RR)(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs5t|jg|jD]}|j^qd|jS(s$Return a cloned (deep) copy of self.RR(RQRR)RRR(RR2((s&/usr/lib64/python2.7/lib2to3/pytree.pyR!s+ccs9x-|jD]"}x|jD] }|VqWq W|VdS(s*Return a post-order iterator for the tree.N(R)R(RR>R7((s&/usr/lib64/python2.7/lib2to3/pytree.pyR&s ccs9|Vx-|jD]"}x|jD] }|Vq"WqWdS(s)Return a pre-order iterator for the tree.N(R)R(RR>R7((s&/usr/lib64/python2.7/lib2to3/pytree.pyR-scCs|js dS|jdjS(sO The whitespace and comments preceding this node in the input. ti(R)R"(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyt_prefix_getter4s cCs |jr||jd_ndS(Ni(R)R"(RR"((s&/usr/lib64/python2.7/lib2to3/pytree.pyt_prefix_setter<s cCs4||_d|j|_||j|<|jdS(s Equivalent to 'node.children[i] = child'. This method also sets the child's parent attribute appropriately. N(R(R*R)R.(RR;R>((s&/usr/lib64/python2.7/lib2to3/pytree.pyt set_childBs  cCs*||_|jj|||jdS(s Equivalent to 'node.children.insert(i, child)'. This method also sets the child's parent attribute appropriately. N(R(R)tinsertR.(RR;R>((s&/usr/lib64/python2.7/lib2to3/pytree.pyt insert_childLs cCs'||_|jj||jdS(s Equivalent to 'node.children.append(child)'. This method also sets the child's parent attribute appropriately. N(R(R)R-R.(RR>((s&/usr/lib64/python2.7/lib2to3/pytree.pyt append_childUs N(ii(RIRJRKR*RTRURXRORPRHRRRRRZR[RNR"R\R^R_(((s&/usr/lib64/python2.7/lib2to3/pytree.pyRQs$           R5cBseZdZdZdZdZddgdZdZdZ e j dkrZe Z ndZ dZd Zd Zd Zd Zd ZeeeZRS(s'Concrete implementation for leaf nodes.RYicCsb|dk r*|\|_\|_|_n||_||_|dk rT||_n||_dS(s Initializer. Takes a type constant (a token number < 256), a string value, and an optional context keyword argument. N(R*t_prefixR6tcolumnRtvalueRR(RRRbRSR"RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRThs     cCsd|jj|j|jfS(s)Return a canonical string representation.s %s(%r, %r)(RRIRRb(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRU{s cCs|jt|jS(sk Return a pretty string representation. This reproduces the input source exactly. (R"RFRb(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRXsicCs"|j|jf|j|jfkS(sCompare two nodes for equality.(RRb(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs4t|j|j|j|j|jffd|jS(s$Return a cloned (deep) copy of self.RR(R5RRbR"R6RaRR(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRsccs |VdS(N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRAsccs |VdS(s*Return a post-order iterator for the tree.N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRsccs |VdS(s)Return a pre-order iterator for the tree.N((R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCs|jS(sP The whitespace and comments preceding this token in the input. (R`(R((s&/usr/lib64/python2.7/lib2to3/pytree.pyRZscCs|j||_dS(N(R.R`(RR"((s&/usr/lib64/python2.7/lib2to3/pytree.pyR[s N(ii(RIRJRKR`R6RaR*RTRURXRORPRHRRRARRRZR[RNR"(((s&/usr/lib64/python2.7/lib2to3/pytree.pyR5_s&           cCsk|\}}}}|s'||jkrTt|dkrA|dSt||d|St||d|SdS(s Convert raw node information to a Node or Leaf instance. This is passed to the parser driver which calls it whenever a reduction of a grammar rule produces a new complete node, so that the tree is build strictly bottom-up. iiRSN(t number2symboltlenRQR5(tgrtraw_nodeRRbRSR)((s&/usr/lib64/python2.7/lib2to3/pytree.pytconverts t BasePatterncBs\eZdZdZdZdZdZdZdZ ddZ ddZ dZ RS(s A pattern is a tree matching pattern. It looks for a specific node type (token or symbol), and optionally for a specific content. This is an abstract base class. There are three concrete subclasses: - LeafPattern matches a single leaf node; - NodePattern matches a single node (usually non-leaf); - WildcardPattern matches a sequence of nodes of variable length. cOs tj|S(s>Constructor that prevents BasePattern from being instantiated.(RR(RRR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRscCsht|j|j|jg}x!|rA|ddkrA|d=q!Wd|jjdjtt |fS(Nis%s(%s)s, ( R RtcontentR R*RRIRVRWtrepr(RR((s&/usr/lib64/python2.7/lib2to3/pytree.pyRUs cCs|S(s A subclass can define this as a hook for optimizations. Returns either self or another node with the same effect. ((R((s&/usr/lib64/python2.7/lib2to3/pytree.pytoptimizescCs|jdk r%|j|jkr%tS|jdk r~d}|dk rOi}n|j||setS|r~|j|q~n|dk r|jr|||j= 256). If the type is None this matches *any* single node (leaf or not), except if content is not None, in which it only matches non-leaf nodes that also match the content pattern. The content, if not None, must be a sequence of Patterns that must match the node's children exactly. If the content is given, the type must not be None. If a name is given, the matching node is stored in the results dict under that key. N( R*R&R:R%tWildcardPatternR,t wildcardsRRiR (RRRiR R;titem((s&/usr/lib64/python2.7/lib2to3/pytree.pyRTFs     cCs|jrhxXt|j|jD]A\}}|t|jkr|dk r\|j|ntSqWtSt|jt|jkrtSx9t |j|jD]"\}}|j ||stSqWtS(s Match the pattern's content to the node's children. This assumes the node type matches and self.content is not None. Returns True if it matches, False if not. If results is not None, it must be a dict which will be updated with the nodes matching named subpatterns. When returning False, the results dict may still be updated. N( RwRsRiR)RdR*RmR,R'tzipRp(RR7RntcRot subpatternR>((s&/usr/lib64/python2.7/lib2to3/pytree.pyRlcs " "N(RIRJR'RwR*RTRl(((s&/usr/lib64/python2.7/lib2to3/pytree.pyRuBsRvcBsheZdZd ded dZdZd dZd dZdZ dZ dZ d Z RS( s A wildcard pattern can match zero or more nodes. This has all the flexibility needed to implement patterns like: .* .+ .? .{m,n} (a b c | d e | f) (...)* (...)+ (...)? (...){m,n} except it always uses non-greedy matching. icCs]|dk r5ttt|}x|D]}q(Wn||_||_||_||_dS(s Initializer. Args: content: optional sequence of subsequences of patterns; if absent, matches one node; if present, each subsequence is an alternative [*] min: optional minimum number of times to match, default 0 max: optional maximum number of times to match, default HUGE name: optional name assigned to this match [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is equivalent to (a b c | d e | f g h); if content is None, this is equivalent to '.' in regular expression terms. The min and max parameters work as follows: min=0, max=maxint: .* min=1, max=maxint: .+ min=0, max=1: .? min=1, max=1: . If content is not None, replace the dot with the parenthesized list of alternatives, e.g. (a b c | d e | f g h)* N(R*ttupleRWRitmintmaxR (RRiR}R~R talt((s&/usr/lib64/python2.7/lib2to3/pytree.pyRTs     cCs/d}|jdk rWt|jdkrWt|jddkrW|jdd}n|jdkr|jdkr|jdkrtd|jS|dk r|j|jkr|jSn|jdkr+t|t r+|jdkr+|j|jkr+t |j|j|j|j|j|jS|S(s+Optimize certain stacked wildcard patterns.iiR N( R*RiRdR}R~RuR RkR%Rv(RR{((s&/usr/lib64/python2.7/lib2to3/pytree.pyRks . !    cCs|j|g|S(s'Does this pattern exactly match a node?(Rr(RR7Rn((s&/usr/lib64/python2.7/lib2to3/pytree.pyRpscCsuxn|j|D]]\}}|t|kr|dk ri|j||jrit|||j s"   pN V,=#refactor.py000064400000066572147204472210006746 0ustar00# Copyright 2006 Google, Inc. All Rights Reserved. # Licensed to PSF under a Contributor Agreement. """Refactoring framework. Used as a main program, this can refactor any number of files and/or recursively descend down directories. Imported as a module, this provides infrastructure to write your own refactoring tool. """ from __future__ import with_statement __author__ = "Guido van Rossum " # Python imports import os import pkgutil import sys import logging import operator import collections import StringIO from itertools import chain # Local imports from .pgen2 import driver, tokenize, token from .fixer_util import find_root from . import pytree, pygram from . import btm_utils as bu from . import btm_matcher as bm def get_all_fix_names(fixer_pkg, remove_prefix=True): """Return a sorted list of all available fix names in the given package.""" pkg = __import__(fixer_pkg, [], [], ["*"]) fix_names = [] for finder, name, ispkg in pkgutil.iter_modules(pkg.__path__): if name.startswith("fix_"): if remove_prefix: name = name[4:] fix_names.append(name) return fix_names class _EveryNode(Exception): pass def _get_head_types(pat): """ Accepts a pytree Pattern Node and returns a set of the pattern types which will match first. """ if isinstance(pat, (pytree.NodePattern, pytree.LeafPattern)): # NodePatters must either have no type and no content # or a type and content -- so they don't get any farther # Always return leafs if pat.type is None: raise _EveryNode return set([pat.type]) if isinstance(pat, pytree.NegatedPattern): if pat.content: return _get_head_types(pat.content) raise _EveryNode # Negated Patterns don't have a type if isinstance(pat, pytree.WildcardPattern): # Recurse on each node in content r = set() for p in pat.content: for x in p: r.update(_get_head_types(x)) return r raise Exception("Oh no! I don't understand pattern %s" %(pat)) def _get_headnode_dict(fixer_list): """ Accepts a list of fixers and returns a dictionary of head node type --> fixer list. """ head_nodes = collections.defaultdict(list) every = [] for fixer in fixer_list: if fixer.pattern: try: heads = _get_head_types(fixer.pattern) except _EveryNode: every.append(fixer) else: for node_type in heads: head_nodes[node_type].append(fixer) else: if fixer._accept_type is not None: head_nodes[fixer._accept_type].append(fixer) else: every.append(fixer) for node_type in chain(pygram.python_grammar.symbol2number.itervalues(), pygram.python_grammar.tokens): head_nodes[node_type].extend(every) return dict(head_nodes) def get_fixers_from_package(pkg_name): """ Return the fully qualified names for fixers in the package pkg_name. """ return [pkg_name + "." + fix_name for fix_name in get_all_fix_names(pkg_name, False)] def _identity(obj): return obj if sys.version_info < (3, 0): import codecs _open_with_encoding = codecs.open # codecs.open doesn't translate newlines sadly. def _from_system_newlines(input): return input.replace(u"\r\n", u"\n") def _to_system_newlines(input): if os.linesep != "\n": return input.replace(u"\n", os.linesep) else: return input else: _open_with_encoding = open _from_system_newlines = _identity _to_system_newlines = _identity def _detect_future_features(source): have_docstring = False gen = tokenize.generate_tokens(StringIO.StringIO(source).readline) def advance(): tok = gen.next() return tok[0], tok[1] ignore = frozenset((token.NEWLINE, tokenize.NL, token.COMMENT)) features = set() try: while True: tp, value = advance() if tp in ignore: continue elif tp == token.STRING: if have_docstring: break have_docstring = True elif tp == token.NAME and value == u"from": tp, value = advance() if tp != token.NAME or value != u"__future__": break tp, value = advance() if tp != token.NAME or value != u"import": break tp, value = advance() if tp == token.OP and value == u"(": tp, value = advance() while tp == token.NAME: features.add(value) tp, value = advance() if tp != token.OP or value != u",": break tp, value = advance() else: break except StopIteration: pass return frozenset(features) class FixerError(Exception): """A fixer could not be loaded.""" class RefactoringTool(object): _default_options = {"print_function" : False, "write_unchanged_files" : False} CLASS_PREFIX = "Fix" # The prefix for fixer classes FILE_PREFIX = "fix_" # The prefix for modules with a fixer within def __init__(self, fixer_names, options=None, explicit=None): """Initializer. Args: fixer_names: a list of fixers to import options: a dict with configuration. explicit: a list of fixers to run even if they are explicit. """ self.fixers = fixer_names self.explicit = explicit or [] self.options = self._default_options.copy() if options is not None: self.options.update(options) if self.options["print_function"]: self.grammar = pygram.python_grammar_no_print_statement else: self.grammar = pygram.python_grammar # When this is True, the refactor*() methods will call write_file() for # files processed even if they were not changed during refactoring. If # and only if the refactor method's write parameter was True. self.write_unchanged_files = self.options.get("write_unchanged_files") self.errors = [] self.logger = logging.getLogger("RefactoringTool") self.fixer_log = [] self.wrote = False self.driver = driver.Driver(self.grammar, convert=pytree.convert, logger=self.logger) self.pre_order, self.post_order = self.get_fixers() self.files = [] # List of files that were or should be modified self.BM = bm.BottomMatcher() self.bmi_pre_order = [] # Bottom Matcher incompatible fixers self.bmi_post_order = [] for fixer in chain(self.post_order, self.pre_order): if fixer.BM_compatible: self.BM.add_fixer(fixer) # remove fixers that will be handled by the bottom-up # matcher elif fixer in self.pre_order: self.bmi_pre_order.append(fixer) elif fixer in self.post_order: self.bmi_post_order.append(fixer) self.bmi_pre_order_heads = _get_headnode_dict(self.bmi_pre_order) self.bmi_post_order_heads = _get_headnode_dict(self.bmi_post_order) def get_fixers(self): """Inspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal. """ pre_order_fixers = [] post_order_fixers = [] for fix_mod_path in self.fixers: mod = __import__(fix_mod_path, {}, {}, ["*"]) fix_name = fix_mod_path.rsplit(".", 1)[-1] if fix_name.startswith(self.FILE_PREFIX): fix_name = fix_name[len(self.FILE_PREFIX):] parts = fix_name.split("_") class_name = self.CLASS_PREFIX + "".join([p.title() for p in parts]) try: fix_class = getattr(mod, class_name) except AttributeError: raise FixerError("Can't find %s.%s" % (fix_name, class_name)) fixer = fix_class(self.options, self.fixer_log) if fixer.explicit and self.explicit is not True and \ fix_mod_path not in self.explicit: self.log_message("Skipping optional fixer: %s", fix_name) continue self.log_debug("Adding transformation: %s", fix_name) if fixer.order == "pre": pre_order_fixers.append(fixer) elif fixer.order == "post": post_order_fixers.append(fixer) else: raise FixerError("Illegal fixer order: %r" % fixer.order) key_func = operator.attrgetter("run_order") pre_order_fixers.sort(key=key_func) post_order_fixers.sort(key=key_func) return (pre_order_fixers, post_order_fixers) def log_error(self, msg, *args, **kwds): """Called when an error occurs.""" raise def log_message(self, msg, *args): """Hook to log a message.""" if args: msg = msg % args self.logger.info(msg) def log_debug(self, msg, *args): if args: msg = msg % args self.logger.debug(msg) def print_output(self, old_text, new_text, filename, equal): """Called with the old version, new version, and filename of a refactored file.""" pass def refactor(self, items, write=False, doctests_only=False): """Refactor a list of files and directories.""" for dir_or_file in items: if os.path.isdir(dir_or_file): self.refactor_dir(dir_or_file, write, doctests_only) else: self.refactor_file(dir_or_file, write, doctests_only) def refactor_dir(self, dir_name, write=False, doctests_only=False): """Descends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. """ py_ext = os.extsep + "py" for dirpath, dirnames, filenames in os.walk(dir_name): self.log_debug("Descending into %s", dirpath) dirnames.sort() filenames.sort() for name in filenames: if (not name.startswith(".") and os.path.splitext(name)[1] == py_ext): fullname = os.path.join(dirpath, name) self.refactor_file(fullname, write, doctests_only) # Modify dirnames in-place to remove subdirs with leading dots dirnames[:] = [dn for dn in dirnames if not dn.startswith(".")] def _read_python_source(self, filename): """ Do our best to decode a Python source file correctly. """ try: f = open(filename, "rb") except IOError as err: self.log_error("Can't open %s: %s", filename, err) return None, None try: encoding = tokenize.detect_encoding(f.readline)[0] finally: f.close() with _open_with_encoding(filename, "r", encoding=encoding) as f: return _from_system_newlines(f.read()), encoding def refactor_file(self, filename, write=False, doctests_only=False): """Refactors a file.""" input, encoding = self._read_python_source(filename) if input is None: # Reading the file failed. return input += u"\n" # Silence certain parse errors if doctests_only: self.log_debug("Refactoring doctests in %s", filename) output = self.refactor_docstring(input, filename) if self.write_unchanged_files or output != input: self.processed_file(output, filename, input, write, encoding) else: self.log_debug("No doctest changes in %s", filename) else: tree = self.refactor_string(input, filename) if self.write_unchanged_files or (tree and tree.was_changed): # The [:-1] is to take off the \n we added earlier self.processed_file(unicode(tree)[:-1], filename, write=write, encoding=encoding) else: self.log_debug("No changes in %s", filename) def refactor_string(self, data, name): """Refactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. """ features = _detect_future_features(data) if "print_function" in features: self.driver.grammar = pygram.python_grammar_no_print_statement try: tree = self.driver.parse_string(data) except Exception as err: self.log_error("Can't parse %s: %s: %s", name, err.__class__.__name__, err) return finally: self.driver.grammar = self.grammar tree.future_features = features self.log_debug("Refactoring %s", name) self.refactor_tree(tree, name) return tree def refactor_stdin(self, doctests_only=False): input = sys.stdin.read() if doctests_only: self.log_debug("Refactoring doctests in stdin") output = self.refactor_docstring(input, "") if self.write_unchanged_files or output != input: self.processed_file(output, "", input) else: self.log_debug("No doctest changes in stdin") else: tree = self.refactor_string(input, "") if self.write_unchanged_files or (tree and tree.was_changed): self.processed_file(unicode(tree), "", input) else: self.log_debug("No changes in stdin") def refactor_tree(self, tree, name): """Refactors a parse tree (modifying the tree in place). For compatible patterns the bottom matcher module is used. Otherwise the tree is traversed node-to-node for matches. Args: tree: a pytree.Node instance representing the root of the tree to be refactored. name: a human-readable name for this tree. Returns: True if the tree was modified, False otherwise. """ for fixer in chain(self.pre_order, self.post_order): fixer.start_tree(tree, name) #use traditional matching for the incompatible fixers self.traverse_by(self.bmi_pre_order_heads, tree.pre_order()) self.traverse_by(self.bmi_post_order_heads, tree.post_order()) # obtain a set of candidate nodes match_set = self.BM.run(tree.leaves()) while any(match_set.values()): for fixer in self.BM.fixers: if fixer in match_set and match_set[fixer]: #sort by depth; apply fixers from bottom(of the AST) to top match_set[fixer].sort(key=pytree.Base.depth, reverse=True) if fixer.keep_line_order: #some fixers(eg fix_imports) must be applied #with the original file's line order match_set[fixer].sort(key=pytree.Base.get_lineno) for node in list(match_set[fixer]): if node in match_set[fixer]: match_set[fixer].remove(node) try: find_root(node) except ValueError: # this node has been cut off from a # previous transformation ; skip continue if node.fixers_applied and fixer in node.fixers_applied: # do not apply the same fixer again continue results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) #new.fixers_applied.append(fixer) for node in new.post_order(): # do not apply the fixer again to # this or any subnode if not node.fixers_applied: node.fixers_applied = [] node.fixers_applied.append(fixer) # update the original match set for # the added code new_matches = self.BM.run(new.leaves()) for fxr in new_matches: if not fxr in match_set: match_set[fxr]=[] match_set[fxr].extend(new_matches[fxr]) for fixer in chain(self.pre_order, self.post_order): fixer.finish_tree(tree, name) return tree.was_changed def traverse_by(self, fixers, traversal): """Traverse an AST, applying a set of fixers to each node. This is a helper method for refactor_tree(). Args: fixers: a list of fixer instances. traversal: a generator that yields AST nodes. Returns: None """ if not fixers: return for node in traversal: for fixer in fixers[node.type]: results = fixer.match(node) if results: new = fixer.transform(node, results) if new is not None: node.replace(new) node = new def processed_file(self, new_text, filename, old_text=None, write=False, encoding=None): """ Called when a file has been refactored and there may be changes. """ self.files.append(filename) if old_text is None: old_text = self._read_python_source(filename)[0] if old_text is None: return equal = old_text == new_text self.print_output(old_text, new_text, filename, equal) if equal: self.log_debug("No changes to %s", filename) if not self.write_unchanged_files: return if write: self.write_file(new_text, filename, old_text, encoding) else: self.log_debug("Not writing changes to %s", filename) def write_file(self, new_text, filename, old_text, encoding=None): """Writes a string to a file. It first shows a unified diff between the old text and the new text, and then rewrites the file; the latter is only done if the write option is set. """ try: f = _open_with_encoding(filename, "w", encoding=encoding) except os.error as err: self.log_error("Can't create %s: %s", filename, err) return try: f.write(_to_system_newlines(new_text)) except os.error as err: self.log_error("Can't write %s: %s", filename, err) finally: f.close() self.log_debug("Wrote changes to %s", filename) self.wrote = True PS1 = ">>> " PS2 = "... " def refactor_docstring(self, input, filename): """Refactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) """ result = [] block = None block_lineno = None indent = None lineno = 0 for line in input.splitlines(True): lineno += 1 if line.lstrip().startswith(self.PS1): if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block_lineno = lineno block = [line] i = line.find(self.PS1) indent = line[:i] elif (indent is not None and (line.startswith(indent + self.PS2) or line == indent + self.PS2.rstrip() + u"\n")): block.append(line) else: if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) block = None indent = None result.append(line) if block is not None: result.extend(self.refactor_doctest(block, block_lineno, indent, filename)) return u"".join(result) def refactor_doctest(self, block, lineno, indent, filename): """Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented). """ try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug("Source: %s", line.rstrip(u"\n")) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = unicode(tree).splitlines(True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == [u"\n"] * (lineno-1), clipped if not new[-1].endswith(u"\n"): new[-1] += u"\n" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block def summarize(self): if self.wrote: were = "were" else: were = "need to be" if not self.files: self.log_message("No files %s modified.", were) else: self.log_message("Files that %s modified:", were) for file in self.files: self.log_message(file) if self.fixer_log: self.log_message("Warnings/messages while refactoring:") for message in self.fixer_log: self.log_message(message) if self.errors: if len(self.errors) == 1: self.log_message("There was 1 error:") else: self.log_message("There were %d errors:", len(self.errors)) for msg, args, kwds in self.errors: self.log_message(msg, *args, **kwds) def parse_block(self, block, lineno, indent): """Parses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree. """ tree = self.driver.parse_tokens(self.wrap_toks(block, lineno, indent)) tree.future_features = frozenset() return tree def wrap_toks(self, block, lineno, indent): """Wraps a tokenize stream to systematically modify start/end.""" tokens = tokenize.generate_tokens(self.gen_lines(block, indent).next) for type, value, (line0, col0), (line1, col1), line_text in tokens: line0 += lineno - 1 line1 += lineno - 1 # Don't bother updating the columns; this is too complicated # since line_text would also have to be updated and it would # still break for tokens spanning lines. Let the user guess # that the column numbers for doctests are relative to the # end of the prompt string (PS1 or PS2). yield type, value, (line0, col0), (line1, col1), line_text def gen_lines(self, block, indent): """Generates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. """ prefix1 = indent + self.PS1 prefix2 = indent + self.PS2 prefix = prefix1 for line in block: if line.startswith(prefix): yield line[len(prefix):] elif line == prefix.rstrip() + u"\n": yield u"\n" else: raise AssertionError("line=%r, prefix=%r" % (line, prefix)) prefix = prefix2 while True: yield "" class MultiprocessingUnsupported(Exception): pass class MultiprocessRefactoringTool(RefactoringTool): def __init__(self, *args, **kwargs): super(MultiprocessRefactoringTool, self).__init__(*args, **kwargs) self.queue = None self.output_lock = None def refactor(self, items, write=False, doctests_only=False, num_processes=1): if num_processes == 1: return super(MultiprocessRefactoringTool, self).refactor( items, write, doctests_only) try: import multiprocessing except ImportError: raise MultiprocessingUnsupported if self.queue is not None: raise RuntimeError("already doing multiple processes") self.queue = multiprocessing.JoinableQueue() self.output_lock = multiprocessing.Lock() processes = [multiprocessing.Process(target=self._child) for i in xrange(num_processes)] try: for p in processes: p.start() super(MultiprocessRefactoringTool, self).refactor(items, write, doctests_only) finally: self.queue.join() for i in xrange(num_processes): self.queue.put(None) for p in processes: if p.is_alive(): p.join() self.queue = None def _child(self): task = self.queue.get() while task is not None: args, kwargs = task try: super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs) finally: self.queue.task_done() task = self.queue.get() def refactor_file(self, *args, **kwargs): if self.queue is not None: self.queue.put((args, kwargs)) else: return super(MultiprocessRefactoringTool, self).refactor_file( *args, **kwargs) refactor.pyc000064400000056550147204472210007104 0ustar00 {fc@sdZddlmZdZddlZddlZddlZddlZddlZddl Z ddl Z ddl m Z ddl mZmZmZddlmZdd lmZmZdd lmZdd lmZed Zd efdYZdZdZdZ dZ!ej"ddfkrgddl#Z#e#j$Z%dZ&dZ'ne$Z%e!Z&e!Z'dZ(defdYZ)de*fdYZ+defdYZ,de+fdYZ-dS( sRefactoring framework. Used as a main program, this can refactor any number of files and/or recursively descend down directories. Imported as a module, this provides infrastructure to write your own refactoring tool. i(twith_statements#Guido van Rossum N(tchaini(tdriverttokenizettoken(t find_root(tpytreetpygram(t btm_utils(t btm_matchercCszt|ggdg}g}xUtj|jD]A\}}}|jdr1|rb|d}n|j|q1q1W|S(sEReturn a sorted list of all available fix names in the given package.t*tfix_i(t __import__tpkgutilt iter_modulest__path__t startswithtappend(t fixer_pkgt remove_prefixtpkgt fix_namestfindertnametispkg((s(/usr/lib64/python2.7/lib2to3/refactor.pytget_all_fix_names"s" t _EveryNodecBseZRS((t__name__t __module__(((s(/usr/lib64/python2.7/lib2to3/refactor.pyR.scCst|tjtjfrC|jdkr3tnt|jgSt|tjrt|j rkt |j Stnt|tj rt}x5|j D]*}x!|D]}|j t |qWqW|St d|dS(sf Accepts a pytree Pattern Node and returns a set of the pattern types which will match first. s$Oh no! I don't understand pattern %sN(t isinstanceRt NodePatternt LeafPatternttypetNoneRtsettNegatedPatterntcontentt_get_head_typestWildcardPatterntupdatet Exception(tpattrtptx((s(/usr/lib64/python2.7/lib2to3/refactor.pyR%2s      cCstjt}g}x|D]}|jryt|j}Wntk r^|j|qXxU|D]}||j|qfWq|jdk r||jj|q|j|qWx:t t j j j t j jD]}||j|qWt|S(s^ Accepts a list of fixers and returns a dictionary of head node type --> fixer list. N(t collectionst defaultdicttlisttpatternR%RRt _accept_typeR!RRtpython_grammart symbol2numbert itervaluesttokenstextendtdict(t fixer_listt head_nodesteverytfixertheadst node_type((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_get_headnode_dictNs"    cCs(gt|tD]}|d|^qS(sN Return the fully qualified names for fixers in the package pkg_name. t.(RtFalse(tpkg_nametfix_name((s(/usr/lib64/python2.7/lib2to3/refactor.pytget_fixers_from_packagegscCs|S(N((tobj((s(/usr/lib64/python2.7/lib2to3/refactor.pyt _identitynsiicCs|jddS(Nu u (treplace(tinput((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_from_system_newlinesuscCs*tjdkr"|jdtjS|SdS(Ns u (tostlinesepRF(RG((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_to_system_newlineswscst}tjtj|jfd}ttjtjtj f}t }ykxdt r|\}}||krq]q]|tj kr|rPnt }q]|tj kr|dkr|\}}|tj ks|dkrPn|\}}|tj ks|dkrPn|\}}|tjkrY|dkrY|\}}nxa|tj kr|j||\}}|tjks|dkrPn|\}}q\Wq]Pq]WWntk rnXt|S(Ncsj}|d|dfS(Nii(tnext(ttok(tgen(s(/usr/lib64/python2.7/lib2to3/refactor.pytadvances ufromu __future__uimportu(u,(R@Rtgenerate_tokenstStringIOtreadlinet frozensetRtNEWLINEtNLtCOMMENTR"tTruetSTRINGtNAMEtOPtaddt StopIteration(tsourcethave_docstringROtignoretfeaturesttptvalue((RNs(/usr/lib64/python2.7/lib2to3/refactor.pyt_detect_future_featuressD       t FixerErrorcBseZdZRS(sA fixer could not be loaded.(RRt__doc__(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRdstRefactoringToolcBs!eZied6ed6ZdZdZdddZdZdZ dZ dZ d Z eed Z eed Zd Zeed ZdZedZdZdZdeddZddZdZdZdZdZdZdZdZdZRS(tprint_functiontwrite_unchanged_filestFixR cCs||_|pg|_|jj|_|dk rI|jj|n|jdretj|_ n tj |_ |jj d|_ g|_ tjd|_g|_t|_tj|j dtjd|j|_|j\|_|_g|_tj|_g|_g|_ x}t!|j|jD]f}|j"rT|jj#|q2||jkrv|jj$|q2||jkr2|j j$|q2q2Wt%|j|_&t%|j |_'dS(sInitializer. Args: fixer_names: a list of fixers to import options: a dict with configuration. explicit: a list of fixers to run even if they are explicit. RgRhRftconverttloggerN((tfixerstexplicitt_default_optionstcopytoptionsR!R'Rt!python_grammar_no_print_statementtgrammarR2tgetRhterrorstloggingt getLoggerRkt fixer_logR@twroteRtDriverRRjt get_fixerst pre_ordert post_ordertfilestbmt BottomMatchertBMt bmi_pre_ordertbmi_post_orderRt BM_compatiblet add_fixerRR>tbmi_pre_order_headstbmi_post_order_heads(tselft fixer_namesRpRmR;((s(/usr/lib64/python2.7/lib2to3/refactor.pyt__init__s<            c Csg}g}x|jD]}t|iidg}|jddd}|j|jrr|t|j}n|jd}|jdjg|D]}|j ^q}yt ||} Wn't k rt d||fnX| |j |j} | jr?|jtk r?||jkr?|jd|qn|jd || jd krn|j| q| jd kr|j| qt d | jqWtjd } |jd| |jd| ||fS(sInspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal. R R?iit_tsCan't find %s.%ssSkipping optional fixer: %ssAdding transformation: %stpretpostsIllegal fixer order: %rt run_ordertkey(RlR trsplitRt FILE_PREFIXtlentsplitt CLASS_PREFIXtjointtitletgetattrtAttributeErrorRdRpRwRmRWt log_messaget log_debugtorderRtoperatort attrgettertsort( Rtpre_order_fixerstpost_order_fixerst fix_mod_pathtmodRBtpartsR+t class_namet fix_classR;tkey_func((s(/usr/lib64/python2.7/lib2to3/refactor.pyRzs8/ cOsdS(sCalled when an error occurs.N((Rtmsgtargstkwds((s(/usr/lib64/python2.7/lib2to3/refactor.pyt log_errorscGs'|r||}n|jj|dS(sHook to log a message.N(Rktinfo(RRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs cGs'|r||}n|jj|dS(N(Rktdebug(RRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs cCsdS(sTCalled with the old version, new version, and filename of a refactored file.N((Rtold_texttnew_texttfilenametequal((s(/usr/lib64/python2.7/lib2to3/refactor.pyt print_output!scCsPxI|D]A}tjj|r5|j|||q|j|||qWdS(s)Refactor a list of files and directories.N(RItpathtisdirt refactor_dirt refactor_file(Rtitemstwritet doctests_onlyt dir_or_file((s(/usr/lib64/python2.7/lib2to3/refactor.pytrefactor&s c Cstjd}xtj|D]\}}}|jd||j|jxe|D]]}|jd rWtjj|d|krWtjj||} |j | ||qWqWWg|D]} | jds| ^q|(qWdS(sDescends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. tpysDescending into %sR?iN( RItextseptwalkRRRRtsplitextRR( Rtdir_nameRRtpy_exttdirpathtdirnamest filenamesRtfullnametdn((s(/usr/lib64/python2.7/lib2to3/refactor.pyR/s    cCsyt|d}Wn'tk r<}|jd||dSXztj|jd}Wd|jXt|dd|}t |j |fSWdQXdS(sG Do our best to decode a Python source file correctly. trbsCan't open %s: %siNR*tencoding(NN( topentIOErrorRR!Rtdetect_encodingRRtcloset_open_with_encodingRHtread(RRtfterrR((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_read_python_sourceCs cCs|j|\}}|dkr%dS|d7}|r|jd||j||}|jsl||kr|j|||||q|jd|nc|j||}|js|r|jr|jt|d |d|d|n|jd|dS( sRefactors a file.Nu sRefactoring doctests in %ssNo doctest changes in %siRRsNo changes in %s( RR!Rtrefactor_docstringRhtprocessed_filetrefactor_stringt was_changedtunicode(RRRRRGRtoutputttree((s(/usr/lib64/python2.7/lib2to3/refactor.pyRSs  cCst|}d|kr*tj|j_nzMy|jj|}Wn0tk ru}|jd||jj |dSXWd|j|j_X||_ |j d||j |||S(sFRefactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. RgsCan't parse %s: %s: %sNsRefactoring %s( RcRRqRRrt parse_stringR(Rt __class__Rtfuture_featuresRt refactor_tree(RtdataRR`RR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRjs     cCstjj}|ro|jd|j|d}|jsI||kr_|j|d|q|jdnS|j|d}|js|r|jr|jt |d|n |jddS(NsRefactoring doctests in stdinssNo doctest changes in stdinsNo changes in stdin( tsyststdinRRRRhRRRR(RRRGRR((s(/usr/lib64/python2.7/lib2to3/refactor.pytrefactor_stdins c Csx-t|j|jD]}|j||qW|j|j|j|j|j|j|jj|j }xt |j rcx|jj D]}||kr||r||j dtjjdt|jr||j dtjjnx[t||D]F}|||kr9||j|nyt|Wntk r]qnX|jr|||jkr|qn|j|}|r|j||}|dk rU|j|x9|jD]+}|jsg|_n|jj|qW|jj|j }x?|D]4} | |kr6g|| >> s... c Csg}d}d}d}d}x+|jtD]}|d7}|jj|jr|dk r|j|j||||n|}|g}|j|j} || }q.|dk r|j||j s|||j j dkr|j |q.|dk r/|j|j||||nd}d}|j |q.W|dk rz|j|j||||ndj |S(sRefactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) iiu uN( R!t splitlinesRWtlstripRtPS1R6trefactor_doctesttfindtPS2trstripRR( RRGRtresulttblockt block_linenotindenttlinenotlineti((s(/usr/lib64/python2.7/lib2to3/refactor.pyR(s:        c Cssy|j|||}Wnutk r}|jjtjrmx*|D]}|jd|jdqGWn|jd|||j j ||SX|j ||rot |j t}||d ||d} }| dg|dkst| |djds|dcd7>>" (possibly indented), while the remaining lines start with "..." (identically indented). s Source: %su s+Can't parse docstring in %s line %s: %s: %siii(t parse_blockR(Rkt isEnabledForRutDEBUGRRRRRRRRRWtAssertionErrortendswithRtpopR( RRRRRRRRRtclipped((s(/usr/lib64/python2.7/lib2to3/refactor.pyRSs&   #.cCs|jrd}nd}|js4|jd|n1|jd|x|jD]}|j|qNW|jr|jdx!|jD]}|j|qWn|jrt|jdkr|jdn|jdt|jx0|jD]"\}}}|j|||qWndS( Ntweres need to besNo files %s modified.sFiles that %s modified:s$Warnings/messages while refactoring:isThere was 1 error:sThere were %d errors:(RxR}RRwRtR(RRtfiletmessageRRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyt summarizeps$      cCs1|jj|j|||}t|_|S(sParses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree. (Rt parse_tokenst wrap_toksRSR(RRRRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyR s! c cstj|j||j}xe|D]]\}}\}}\} } } ||d7}| |d7} ||||f| | f| fVq%WdS(s;Wraps a tokenize stream to systematically modify start/end.iN(RRPt gen_linesRL( RRRRR5R Rbtline0tcol0tline1tcol1t line_text((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs (ccs||j}||j}|}xi|D]a}|j|rN|t|Vn4||jdkrldVntd||f|}q'WxtrdVqWdS(sGenerates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. u sline=%r, prefix=%rRN(RRRRRR RW(RRRtprefix1tprefix2tprefixR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs     N(RRR@RnRRR!RRzRRRRRRRRRRRRRRRRRRRR RR(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRfs:  4 (         O    +   tMultiprocessingUnsupportedcBseZRS((RR(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRstMultiprocessRefactoringToolcBs5eZdZeeddZdZdZRS(cOs/tt|j||d|_d|_dS(N(tsuperR RR!tqueuet output_lock(RRtkwargs((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs ic Csf|dkr(tt|j|||Syddl}Wntk rQtnX|jdk rptdn|j |_|j |_ gt |D]}|j d|j^q}z;x|D]}|jqWtt|j|||Wd|jjx$t |D]}|jjdqWx'|D]}|jr5|jq5q5Wd|_XdS(Niis already doing multiple processesttarget(R!R Rtmultiprocessingt ImportErrorRR"R!t RuntimeErrort JoinableQueuetLockR#txrangetProcesst_childtstartRtputtis_alive( RRRRt num_processesR&Rt processesR+((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs2    +     cCso|jj}xY|dk rj|\}}ztt|j||Wd|jjX|jj}qWdS(N(R"RsR!R!R Rt task_done(RttaskRR$((s(/usr/lib64/python2.7/lib2to3/refactor.pyR-s cOsE|jdk r(|jj||fntt|j||SdS(N(R"R!R/R!R R(RRR$((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs(RRRR@RR-R(((s(/usr/lib64/python2.7/lib2to3/refactor.pyR s    (.Ret __future__Rt __author__RIR RRuRR-RQt itertoolsRtpgen2RRRt fixer_utilRRRRRtbuR R~RWRR(RR%R>RCREt version_infotcodecsRRRHRKRcRdtobjectRfRR (((s(/usr/lib64/python2.7/lib2to3/refactor.pyt sH                 (refactor.pyo000064400000056476147204472210007127 0ustar00 {fc@sdZddlmZdZddlZddlZddlZddlZddlZddl Z ddl Z ddl m Z ddl mZmZmZddlmZdd lmZmZdd lmZdd lmZed Zd efdYZdZdZdZ dZ!ej"ddfkrgddl#Z#e#j$Z%dZ&dZ'ne$Z%e!Z&e!Z'dZ(defdYZ)de*fdYZ+defdYZ,de+fdYZ-dS( sRefactoring framework. Used as a main program, this can refactor any number of files and/or recursively descend down directories. Imported as a module, this provides infrastructure to write your own refactoring tool. i(twith_statements#Guido van Rossum N(tchaini(tdriverttokenizettoken(t find_root(tpytreetpygram(t btm_utils(t btm_matchercCszt|ggdg}g}xUtj|jD]A\}}}|jdr1|rb|d}n|j|q1q1W|S(sEReturn a sorted list of all available fix names in the given package.t*tfix_i(t __import__tpkgutilt iter_modulest__path__t startswithtappend(t fixer_pkgt remove_prefixtpkgt fix_namestfindertnametispkg((s(/usr/lib64/python2.7/lib2to3/refactor.pytget_all_fix_names"s" t _EveryNodecBseZRS((t__name__t __module__(((s(/usr/lib64/python2.7/lib2to3/refactor.pyR.scCst|tjtjfrC|jdkr3tnt|jgSt|tjrt|j rkt |j Stnt|tj rt}x5|j D]*}x!|D]}|j t |qWqW|St d|dS(sf Accepts a pytree Pattern Node and returns a set of the pattern types which will match first. s$Oh no! I don't understand pattern %sN(t isinstanceRt NodePatternt LeafPatternttypetNoneRtsettNegatedPatterntcontentt_get_head_typestWildcardPatterntupdatet Exception(tpattrtptx((s(/usr/lib64/python2.7/lib2to3/refactor.pyR%2s      cCstjt}g}x|D]}|jryt|j}Wntk r^|j|qXxU|D]}||j|qfWq|jdk r||jj|q|j|qWx:t t j j j t j jD]}||j|qWt|S(s^ Accepts a list of fixers and returns a dictionary of head node type --> fixer list. N(t collectionst defaultdicttlisttpatternR%RRt _accept_typeR!RRtpython_grammart symbol2numbert itervaluesttokenstextendtdict(t fixer_listt head_nodesteverytfixertheadst node_type((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_get_headnode_dictNs"    cCs(gt|tD]}|d|^qS(sN Return the fully qualified names for fixers in the package pkg_name. t.(RtFalse(tpkg_nametfix_name((s(/usr/lib64/python2.7/lib2to3/refactor.pytget_fixers_from_packagegscCs|S(N((tobj((s(/usr/lib64/python2.7/lib2to3/refactor.pyt _identitynsiicCs|jddS(Nu u (treplace(tinput((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_from_system_newlinesuscCs*tjdkr"|jdtjS|SdS(Ns u (tostlinesepRF(RG((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_to_system_newlineswscst}tjtj|jfd}ttjtjtj f}t }ykxdt r|\}}||krq]q]|tj kr|rPnt }q]|tj kr|dkr|\}}|tj ks|dkrPn|\}}|tj ks|dkrPn|\}}|tjkrY|dkrY|\}}nxa|tj kr|j||\}}|tjks|dkrPn|\}}q\Wq]Pq]WWntk rnXt|S(Ncsj}|d|dfS(Nii(tnext(ttok(tgen(s(/usr/lib64/python2.7/lib2to3/refactor.pytadvances ufromu __future__uimportu(u,(R@Rtgenerate_tokenstStringIOtreadlinet frozensetRtNEWLINEtNLtCOMMENTR"tTruetSTRINGtNAMEtOPtaddt StopIteration(tsourcethave_docstringROtignoretfeaturesttptvalue((RNs(/usr/lib64/python2.7/lib2to3/refactor.pyt_detect_future_featuressD       t FixerErrorcBseZdZRS(sA fixer could not be loaded.(RRt__doc__(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRdstRefactoringToolcBs!eZied6ed6ZdZdZdddZdZdZ dZ dZ d Z eed Z eed Zd Zeed ZdZedZdZdZdeddZddZdZdZdZdZdZdZdZdZRS(tprint_functiontwrite_unchanged_filestFixR cCs||_|pg|_|jj|_|dk rI|jj|n|jdretj|_ n tj |_ |jj d|_ g|_ tjd|_g|_t|_tj|j dtjd|j|_|j\|_|_g|_tj|_g|_g|_ x}t!|j|jD]f}|j"rT|jj#|q2||jkrv|jj$|q2||jkr2|j j$|q2q2Wt%|j|_&t%|j |_'dS(sInitializer. Args: fixer_names: a list of fixers to import options: a dict with configuration. explicit: a list of fixers to run even if they are explicit. RgRhRftconverttloggerN((tfixerstexplicitt_default_optionstcopytoptionsR!R'Rt!python_grammar_no_print_statementtgrammarR2tgetRhterrorstloggingt getLoggerRkt fixer_logR@twroteRtDriverRRjt get_fixerst pre_ordert post_ordertfilestbmt BottomMatchertBMt bmi_pre_ordertbmi_post_orderRt BM_compatiblet add_fixerRR>tbmi_pre_order_headstbmi_post_order_heads(tselft fixer_namesRpRmR;((s(/usr/lib64/python2.7/lib2to3/refactor.pyt__init__s<            c Csg}g}x|jD]}t|iidg}|jddd}|j|jrr|t|j}n|jd}|jdjg|D]}|j ^q}yt ||} Wn't k rt d||fnX| |j |j} | jr?|jtk r?||jkr?|jd|qn|jd || jd krn|j| q| jd kr|j| qt d | jqWtjd } |jd| |jd| ||fS(sInspects the options to load the requested patterns and handlers. Returns: (pre_order, post_order), where pre_order is the list of fixers that want a pre-order AST traversal, and post_order is the list that want post-order traversal. R R?iit_tsCan't find %s.%ssSkipping optional fixer: %ssAdding transformation: %stpretpostsIllegal fixer order: %rt run_ordertkey(RlR trsplitRt FILE_PREFIXtlentsplitt CLASS_PREFIXtjointtitletgetattrtAttributeErrorRdRpRwRmRWt log_messaget log_debugtorderRtoperatort attrgettertsort( Rtpre_order_fixerstpost_order_fixerst fix_mod_pathtmodRBtpartsR+t class_namet fix_classR;tkey_func((s(/usr/lib64/python2.7/lib2to3/refactor.pyRzs8/ cOsdS(sCalled when an error occurs.N((Rtmsgtargstkwds((s(/usr/lib64/python2.7/lib2to3/refactor.pyt log_errorscGs'|r||}n|jj|dS(sHook to log a message.N(Rktinfo(RRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs cGs'|r||}n|jj|dS(N(Rktdebug(RRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs cCsdS(sTCalled with the old version, new version, and filename of a refactored file.N((Rtold_texttnew_texttfilenametequal((s(/usr/lib64/python2.7/lib2to3/refactor.pyt print_output!scCsPxI|D]A}tjj|r5|j|||q|j|||qWdS(s)Refactor a list of files and directories.N(RItpathtisdirt refactor_dirt refactor_file(Rtitemstwritet doctests_onlyt dir_or_file((s(/usr/lib64/python2.7/lib2to3/refactor.pytrefactor&s c Cstjd}xtj|D]\}}}|jd||j|jxe|D]]}|jd rWtjj|d|krWtjj||} |j | ||qWqWWg|D]} | jds| ^q|(qWdS(sDescends down a directory and refactor every Python file found. Python files are assumed to have a .py extension. Files and subdirectories starting with '.' are skipped. tpysDescending into %sR?iN( RItextseptwalkRRRRtsplitextRR( Rtdir_nameRRtpy_exttdirpathtdirnamest filenamesRtfullnametdn((s(/usr/lib64/python2.7/lib2to3/refactor.pyR/s    cCsyt|d}Wn'tk r<}|jd||dSXztj|jd}Wd|jXt|dd|}t |j |fSWdQXdS(sG Do our best to decode a Python source file correctly. trbsCan't open %s: %siNR*tencoding(NN( topentIOErrorRR!Rtdetect_encodingRRtcloset_open_with_encodingRHtread(RRtfterrR((s(/usr/lib64/python2.7/lib2to3/refactor.pyt_read_python_sourceCs cCs|j|\}}|dkr%dS|d7}|r|jd||j||}|jsl||kr|j|||||q|jd|nc|j||}|js|r|jr|jt|d |d|d|n|jd|dS( sRefactors a file.Nu sRefactoring doctests in %ssNo doctest changes in %siRRsNo changes in %s( RR!Rtrefactor_docstringRhtprocessed_filetrefactor_stringt was_changedtunicode(RRRRRGRtoutputttree((s(/usr/lib64/python2.7/lib2to3/refactor.pyRSs  cCst|}d|kr*tj|j_nzMy|jj|}Wn0tk ru}|jd||jj |dSXWd|j|j_X||_ |j d||j |||S(sFRefactor a given input string. Args: data: a string holding the code to be refactored. name: a human-readable name for use in error/log messages. Returns: An AST corresponding to the refactored input stream; None if there were errors during the parse. RgsCan't parse %s: %s: %sNsRefactoring %s( RcRRqRRrt parse_stringR(Rt __class__Rtfuture_featuresRt refactor_tree(RtdataRR`RR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRjs     cCstjj}|ro|jd|j|d}|jsI||kr_|j|d|q|jdnS|j|d}|js|r|jr|jt |d|n |jddS(NsRefactoring doctests in stdinssNo doctest changes in stdinsNo changes in stdin( tsyststdinRRRRhRRRR(RRRGRR((s(/usr/lib64/python2.7/lib2to3/refactor.pytrefactor_stdins c Csx-t|j|jD]}|j||qW|j|j|j|j|j|j|jj|j }xt |j rcx|jj D]}||kr||r||j dtjjdt|jr||j dtjjnx[t||D]F}|||kr9||j|nyt|Wntk r]qnX|jr|||jkr|qn|j|}|r|j||}|dk rU|j|x9|jD]+}|jsg|_n|jj|qW|jj|j }x?|D]4} | |kr6g|| >> s... c Csg}d}d}d}d}x+|jtD]}|d7}|jj|jr|dk r|j|j||||n|}|g}|j|j} || }q.|dk r|j||j s|||j j dkr|j |q.|dk r/|j|j||||nd}d}|j |q.W|dk rz|j|j||||ndj |S(sRefactors a docstring, looking for doctests. This returns a modified version of the input string. It looks for doctests, which start with a ">>>" prompt, and may be continued with "..." prompts, as long as the "..." is indented the same as the ">>>". (Unfortunately we can't use the doctest module's parser, since, like most parsers, it is not geared towards preserving the original source.) iiu uN( R!t splitlinesRWtlstripRtPS1R6trefactor_doctesttfindtPS2trstripRR( RRGRtresulttblockt block_linenotindenttlinenotlineti((s(/usr/lib64/python2.7/lib2to3/refactor.pyR(s:        c CsPy|j|||}Wnutk r}|jjtjrmx*|D]}|jd|jdqGWn|jd|||j j ||SX|j ||rLt |j t}||d ||d} }|djds|dcd7>>" (possibly indented), while the remaining lines start with "..." (identically indented). s Source: %su s+Can't parse docstring in %s line %s: %s: %siii(t parse_blockR(Rkt isEnabledForRutDEBUGRRRRRRRRRWtendswithRtpopR( RRRRRRRRRtclipped((s(/usr/lib64/python2.7/lib2to3/refactor.pyRSs$   .cCs|jrd}nd}|js4|jd|n1|jd|x|jD]}|j|qNW|jr|jdx!|jD]}|j|qWn|jrt|jdkr|jdn|jdt|jx0|jD]"\}}}|j|||qWndS( Ntweres need to besNo files %s modified.sFiles that %s modified:s$Warnings/messages while refactoring:isThere was 1 error:sThere were %d errors:(RxR}RRwRtR(RRtfiletmessageRRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyt summarizeps$      cCs1|jj|j|||}t|_|S(sParses a block into a tree. This is necessary to get correct line number / offset information in the parser diagnostics and embedded into the parse tree. (Rt parse_tokenst wrap_toksRSR(RRRRR((s(/usr/lib64/python2.7/lib2to3/refactor.pyR s! c cstj|j||j}xe|D]]\}}\}}\} } } ||d7}| |d7} ||||f| | f| fVq%WdS(s;Wraps a tokenize stream to systematically modify start/end.iN(RRPt gen_linesRL( RRRRR5R Rbtline0tcol0tline1tcol1t line_text((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs (ccs||j}||j}|}xi|D]a}|j|rN|t|Vn4||jdkrldVntd||f|}q'WxtrdVqWdS(sGenerates lines as expected by tokenize from a list of lines. This strips the first len(indent + self.PS1) characters off each line. u sline=%r, prefix=%rRN(RRRRRtAssertionErrorRW(RRRtprefix1tprefix2tprefixR((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs     N(RRR@RnRRR!RRzRRRRRRRRRRRRRRRRRRRR RR(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRfs:  4 (         O    +   tMultiprocessingUnsupportedcBseZRS((RR(((s(/usr/lib64/python2.7/lib2to3/refactor.pyRstMultiprocessRefactoringToolcBs5eZdZeeddZdZdZRS(cOs/tt|j||d|_d|_dS(N(tsuperR RR!tqueuet output_lock(RRtkwargs((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs ic Csf|dkr(tt|j|||Syddl}Wntk rQtnX|jdk rptdn|j |_|j |_ gt |D]}|j d|j^q}z;x|D]}|jqWtt|j|||Wd|jjx$t |D]}|jjdqWx'|D]}|jr5|jq5q5Wd|_XdS(Niis already doing multiple processesttarget(R!R Rtmultiprocessingt ImportErrorRR"R!t RuntimeErrort JoinableQueuetLockR#txrangetProcesst_childtstartRtputtis_alive( RRRRt num_processesR&Rt processesR+((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs2    +     cCso|jj}xY|dk rj|\}}ztt|j||Wd|jjX|jj}qWdS(N(R"RsR!R!R Rt task_done(RttaskRR$((s(/usr/lib64/python2.7/lib2to3/refactor.pyR-s cOsE|jdk r(|jj||fntt|j||SdS(N(R"R!R/R!R R(RRR$((s(/usr/lib64/python2.7/lib2to3/refactor.pyRs(RRRR@RR-R(((s(/usr/lib64/python2.7/lib2to3/refactor.pyR s    (.Ret __future__Rt __author__RIR RRuRR-RQt itertoolsRtpgen2RRRt fixer_utilRRRRRtbuR R~RWRR(RR%R>RCREt version_infotcodecsRRRHRKRcRdtobjectRfRR (((s(/usr/lib64/python2.7/lib2to3/refactor.pyt sH                 (