diff --git a/test/jstests/array_access.py b/test/jstests/array_access.py index 697967b21..c809a7716 100644 --- a/test/jstests/array_access.py +++ b/test/jstests/array_access.py @@ -1,79 +1,79 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS tests = [ {'code': 'function f() { var x = [1,2,3]; x[0] = 4; x[0] = 5; x[2] = 7; return x; }', 'asserts': [{'value': [5, 2, 7], 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip(['x'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ARRAY, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 2), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 3), None, None)]), None) + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ARRAY, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 3), None, None)]), None) ]), None, None), ]), None) ]) ), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, - (Token.ELEM, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.ELEM, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ]), None)) ]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 4), None, None)]), None) + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 4), None, None)]), None) ) ]), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), - None, - (Token.ELEM, (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), + (TokenTypes.ELEM, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, + None, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ]), None)) - ]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 5), None, None)]), None)) + ]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 5), None, None)]), None)) ]), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), - None, - (Token.ELEM, (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 2), None, None)]), + (TokenTypes.ELEM, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, + None, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None)]), None) ]), None)) - ]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 7), None, None)]), None)) + ]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 7), None, None)]), None)) ]), - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), None) + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), None) ]) ) ]) diff --git a/test/jstests/assignments.py b/test/jstests/assignments.py index ef9ccf8d0..f0f2b142c 100644 --- a/test/jstests/assignments.py +++ b/test/jstests/assignments.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS, _ASSIGN_OPERATORS tests = [ @@ -8,30 +8,30 @@ tests = [ 'code': 'function f() { var x = 20; x = 30 + 1; return x; }', 'asserts': [{'value': 31, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, zip( + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip( ['x'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 20), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 20), None, None)]), None)] )), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 30), None, None), - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.OP, _OPERATORS['+'][1])]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 30), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.OP, _OPERATORS['+'][1])]), None)) ]), - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None) + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None) ]), None) ])) ]) diff --git a/test/jstests/basic.py b/test/jstests/basic.py index 36d1e9b43..52d24bac5 100644 --- a/test/jstests/basic.py +++ b/test/jstests/basic.py @@ -1,18 +1,18 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes tests = [ { 'code': 'function f() { return 42; }', 'asserts': [{'value': 42, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 42), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 42), None, None)]), None) ])) ]) @@ -21,7 +21,7 @@ tests = [ { 'code': 'function x() {;}', 'asserts': [{'value': None, 'call': ('x',)}], - 'ast': [(Token.FUNC, 'x', [], [None])] + 'ast': [(TokenTypes.FUNC, 'x', [], [None])] }, { # FIXME: function expression needs to be implemented diff --git a/test/jstests/branch.py b/test/jstests/branch.py index 535159f84..294f4b624 100644 --- a/test/jstests/branch.py +++ b/test/jstests/branch.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _RELATIONS skip = { @@ -20,17 +20,17 @@ tests = [ ''', 'asserts': [{'value': True, 'call': ('a', 1)}, {'value': False, 'call': ('a', 0)}], 'ast': [ - (Token.FUNC, 'a', ['x'], [ - (Token.IF, - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.MEMBER, (Token.INT, 0), None, None), - (Token.REL, _RELATIONS['>'][1]) + (TokenTypes.FUNC, 'a', ['x'], [ + (TokenTypes.IF, + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None), + (TokenTypes.REL, _RELATIONS['>'][1]) ]), None)]), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.BOOL, True), None, None)]), None)])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.BOOL, False), None, None)]), None)]))) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.BOOL, True), None, None)]), None)])), + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.BOOL, False), None, None)]), None)]))) ]) ] } diff --git a/test/jstests/calc.py b/test/jstests/calc.py index a32f10ae9..0b322de16 100644 --- a/test/jstests/calc.py +++ b/test/jstests/calc.py @@ -1,24 +1,24 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS tests = [ {'code': 'function x4(a){return 2*a+1;}', 'asserts': [{'value': 7, 'call': ('x4', 3)}], 'ast': [ - (Token.FUNC, 'x4', ['a'], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.FUNC, 'x4', ['a'], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ + (TokenTypes.OPEXPR, [ # Reverse Polish Notation! - (Token.MEMBER, (Token.INT, 2), None, None), - (Token.MEMBER, (Token.ID, 'a'), None, None), - (Token.OP, _OPERATORS['*'][1]), - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None), + (TokenTypes.OP, _OPERATORS['*'][1]), + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ]) diff --git a/test/jstests/call.py b/test/jstests/call.py index 57f31b798..2d2ebfff1 100644 --- a/test/jstests/call.py +++ b/test/jstests/call.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS tests = [ @@ -12,26 +12,26 @@ tests = [ ''', 'asserts': [{'value': 5, 'call': ('z',)}], 'ast': [ - (Token.FUNC, 'x', [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 2), None, None)]), None) + (TokenTypes.FUNC, 'x', [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None)]), None) ])) ]), - (Token.FUNC, 'y', ['a'], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, (Token.CALL, [], None)), - (Token.MEMBER, (Token.ID, 'a'), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.FUNC, 'y', ['a'], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, (TokenTypes.CALL, [], None)), + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ])) ]), - (Token.FUNC, 'z', [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'y'), None, (Token.CALL, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 3), None, None)]), None) + (TokenTypes.FUNC, 'z', [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'y'), None, (TokenTypes.CALL, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 3), None, None)]), None) ], None)) ]), None) ]) @@ -42,13 +42,13 @@ tests = [ 'code': 'function x(a) { return a.split(""); }', 'asserts': [{'value': ["a", "b", "c"], 'call': ('x', "abc")}], 'ast': [ - (Token.FUNC, 'x', ['a'], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), None, - (Token.FIELD, 'split', - (Token.CALL, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.STR, ''), None, None)]), None) + (TokenTypes.FUNC, 'x', ['a'], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, + (TokenTypes.FIELD, 'split', + (TokenTypes.CALL, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.STR, ''), None, None)]), None) ], None)) )]), None) @@ -64,32 +64,32 @@ tests = [ ''', 'asserts': [{'value': 0, 'call': ('c',)}], 'ast': [ - (Token.FUNC, 'a', ['x'], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), None) + (TokenTypes.FUNC, 'a', ['x'], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), None) ])) ]), - (Token.FUNC, 'b', ['x'], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.FUNC, 'b', ['x'], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ])) ]), - (Token.FUNC, 'c', [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ARRAY, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'b'), None, None)]), None) - ]), None, (Token.ELEM, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), None) - ]), (Token.CALL, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), None) + (TokenTypes.FUNC, 'c', [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ARRAY, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'b'), None, None)]), None) + ]), None, (TokenTypes.ELEM, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) + ]), (TokenTypes.CALL, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ], None))) ]), None) ])) diff --git a/test/jstests/comments.py b/test/jstests/comments.py index 67fe709f1..7591e09bb 100644 --- a/test/jstests/comments.py +++ b/test/jstests/comments.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS skip = {'jsinterp': 'Not yet fully implemented'} @@ -17,27 +17,27 @@ tests = [ ''', 'asserts': [{'value': 52, 'call': ('x',)}], 'ast': [ - (Token.FUNC, 'x', [], [ - (Token.VAR, zip( + (TokenTypes.FUNC, 'x', [], [ + (TokenTypes.VAR, zip( ['x'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 2), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None)]), None)] )), - (Token.VAR, zip( + (TokenTypes.VAR, zip( ['y'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 50), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 50), None, None)]), None)] )), - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.MEMBER, (Token.ID, 'y'), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'y'), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ])) ]) @@ -52,28 +52,28 @@ tests = [ ''', 'asserts': [{'value': 3, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, zip( + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip( ['x'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.STR, '/*'), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.STR, '/*'), None, None)]), None)] )), - (Token.VAR, zip( + (TokenTypes.VAR, zip( ['y'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.MEMBER, (Token.INT, 2), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None)] )), - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'y'), None, None)]), + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'y'), None, None)]), None) ])) ]) diff --git a/test/jstests/debug.py b/test/jstests/debug.py index 9bdbdab7e..fe9f0add8 100644 --- a/test/jstests/debug.py +++ b/test/jstests/debug.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes skip = { 'jsinterp': 'Debugger statement is not supported', diff --git a/test/jstests/do_loop.py b/test/jstests/do_loop.py index 98bdf144a..9368d179c 100644 --- a/test/jstests/do_loop.py +++ b/test/jstests/do_loop.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _UNARY_OPERATORS, _RELATIONS skip = { @@ -21,30 +21,30 @@ tests = [ ''', 'asserts': [{'value': 5, 'call': ('f', 5)}], 'ast': [ - (Token.FUNC, 'f', ['x'], [ - (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'i'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 1), None, None)]), None)) + (TokenTypes.FUNC, 'f', ['x'], [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None)]), None)) ]), - (Token.DO, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None), - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.REL, _RELATIONS['<'][1]) + (TokenTypes.DO, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.REL, _RELATIONS['<'][1]) ]), None) ]), - (Token.BLOCK, [ - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None), - (Token.POSTFIX, _UNARY_OPERATORS['++'][1]) + (TokenTypes.BLOCK, [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None), + (TokenTypes.POSTFIX, _UNARY_OPERATORS['++'][1]) ]), None) ]) ])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/empty_return.py b/test/jstests/empty_return.py index 49d2c161f..29181f88b 100644 --- a/test/jstests/empty_return.py +++ b/test/jstests/empty_return.py @@ -1,21 +1,21 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes tests = [ {'code': 'function f() { return; y(); }', 'asserts': [{'value': None, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, None), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, None), + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, - (Token.ID, 'y'), + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, + (TokenTypes.ID, 'y'), None, - (Token.CALL, [], None) + (TokenTypes.CALL, [], None) ) ]), None) diff --git a/test/jstests/for_empty.py b/test/jstests/for_empty.py index 8085eb8e0..a50577de0 100644 --- a/test/jstests/for_empty.py +++ b/test/jstests/for_empty.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _UNARY_OPERATORS, _RELATIONS skip = { @@ -21,30 +21,30 @@ tests = [ ''', 'asserts': [{'value': 5, 'call': ('f', 5)}], 'ast': [ - (Token.FUNC, 'f', ['x'], [ - (Token.VAR, zip(['h'], [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), None) + (TokenTypes.FUNC, 'f', ['x'], [ + (TokenTypes.VAR, zip(['h'], [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ])), - (Token.FOR, + (TokenTypes.FOR, None, - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'h'), None, None), - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.REL, _RELATIONS['<='][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.REL, _RELATIONS['<='][1]) ]), None)]), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'h'), None, None), - (Token.PREFIX, _UNARY_OPERATORS['++'][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None), + (TokenTypes.PREFIX, _UNARY_OPERATORS['++'][1]) ]), None)]), - (Token.BLOCK, [ - (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'a'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'h'), None, None)]), None)) + (TokenTypes.BLOCK, [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None)]), None)) ]) ])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/for_in.py b/test/jstests/for_in.py index b19424ae4..ebfcdd585 100644 --- a/test/jstests/for_in.py +++ b/test/jstests/for_in.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS skip = { @@ -20,24 +20,24 @@ tests = [ ''', 'asserts': [{'value': 'c', 'call': ('f', ['a', 'b', 'c'])}], 'ast': [ - (Token.FUNC, 'f', ['z'], [ - (Token.FOR, - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'h'), None, None) + (TokenTypes.FUNC, 'f', ['z'], [ + (TokenTypes.FOR, + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None) ]), None)]), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'z'), None, None) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'z'), None, None) ]), None)]), None, - (Token.BLOCK, [ - (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'a'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'h'), None, None)]), None)) + (TokenTypes.BLOCK, [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None)]), None)) ]) ])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/for_loop.py b/test/jstests/for_loop.py index 64f834593..0923202e4 100644 --- a/test/jstests/for_loop.py +++ b/test/jstests/for_loop.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _UNARY_OPERATORS, _RELATIONS skip = { @@ -20,29 +20,29 @@ tests = [ ''', 'asserts': [{'value': 5, 'call': ('f', 5)}], 'ast': [ - (Token.FUNC, 'f', ['x'], [ - (Token.FOR, - (Token.VAR, zip(['h'], [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), None) + (TokenTypes.FUNC, 'f', ['x'], [ + (TokenTypes.FOR, + (TokenTypes.VAR, zip(['h'], [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ])), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'h'), None, None), - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.REL, _RELATIONS['<='][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.REL, _RELATIONS['<='][1]) ]), None)]), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'h'), None, None), - (Token.PREFIX, _UNARY_OPERATORS['++'][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None), + (TokenTypes.PREFIX, _UNARY_OPERATORS['++'][1]) ]), None)]), - (Token.BLOCK, [ - (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'a'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'h'), None, None)]), None)) + (TokenTypes.BLOCK, [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'h'), None, None)]), None)) ]) ])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/func_expr.py b/test/jstests/func_expr.py index 4873500e0..ad12a4a56 100644 --- a/test/jstests/func_expr.py +++ b/test/jstests/func_expr.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS skip = { @@ -23,38 +23,38 @@ tests = [ ''', 'asserts': [{'value': 3, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, zip(['add'], [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.FUNC, None, [], [ - (Token.VAR, zip( + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip(['add'], [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.FUNC, None, [], [ + (TokenTypes.VAR, zip( ['counter'], - [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 0), None, None) + [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None) ]), None)] )), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.FUNC, None, [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['+='][1], (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'counter'), None, None) - ]), (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.FUNC, None, [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['+='][1], (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'counter'), None, None) + ]), (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None) ]), None)) ])) ]), None, None) ]), None)])) ]), None, None), - ]), None)]), None, (Token.CALL, [], None)) + ]), None)]), None, (TokenTypes.CALL, [], None)) ]), None)])), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'add'), None, (Token.CALL, [], None)) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'add'), None, (TokenTypes.CALL, [], None)) ]), None)]), - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'add'), None, (Token.CALL, [], None)) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'add'), None, (TokenTypes.CALL, [], None)) ]), None)]), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'add'), None, (Token.CALL, [], None)) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'add'), None, (TokenTypes.CALL, [], None)) ]), None)])) ]) ] diff --git a/test/jstests/getfield.py b/test/jstests/getfield.py index c404a0371..2c8c5bcba 100644 --- a/test/jstests/getfield.py +++ b/test/jstests/getfield.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes skip = {'jsinterp': 'Field access is not supported'} @@ -10,16 +10,16 @@ tests = [ 'asserts': [{'value': 3, 'call': ('f',)}], 'globals': {'a': {'var': 3}}, 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, - (Token.ID, 'a'), + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, + (TokenTypes.ID, 'a'), None, - (Token.FIELD, 'var', None)), + (TokenTypes.FIELD, 'var', None)), ]), None) ])) diff --git a/test/jstests/label.py b/test/jstests/label.py index ed33c4d13..06622d483 100644 --- a/test/jstests/label.py +++ b/test/jstests/label.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes skip = { 'jsinterp': 'Label statement is not supported', diff --git a/test/jstests/morespace.py b/test/jstests/morespace.py index 83c5e6845..c5b96bb0a 100644 --- a/test/jstests/morespace.py +++ b/test/jstests/morespace.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS tests = [ @@ -8,22 +8,22 @@ tests = [ 'code': 'function f() { x = 2 ; return x; }', 'asserts': [{'value': 2, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.EXPR, - [(Token.ASSIGN, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.EXPR, + [(TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), - (Token.ASSIGN, + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 2), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None)]), None) )] ), - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), None) ]) ) diff --git a/test/jstests/object_literal.py b/test/jstests/object_literal.py index 95296f3aa..ba97c5420 100644 --- a/test/jstests/object_literal.py +++ b/test/jstests/object_literal.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _OPERATORS skip = { @@ -21,32 +21,32 @@ tests = [ } ''', 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip(['o'], - [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.OBJECT, [ - ('a', (Token.PROPVALUE, (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 7), None, None) + [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.OBJECT, [ + ('a', (TokenTypes.PROPVALUE, (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 7), None, None) ]), None))), - ('b', (Token.PROPGET, [ - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.RSV, 'this'), None, (Token.FIELD, 'a', None)), - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.OP, _OPERATORS['+'][1]) + ('b', (TokenTypes.PROPGET, [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.RSV, 'this'), None, (TokenTypes.FIELD, 'a', None)), + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None)])) ])), - ('c', (Token.PROPSET, 'x', [ - (Token.EXPR, [ - (Token.ASSIGN, + ('c', (TokenTypes.PROPSET, 'x', [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [ - (Token.MEMBER, (Token.RSV, 'this'), None, (Token.FIELD, 'a', None)) + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.RSV, 'this'), None, (TokenTypes.FIELD, 'a', None)) ]), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.MEMBER, (Token.INT, 2), None, None), - (Token.OP, _OPERATORS['/'][1]) + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None), + (TokenTypes.OP, _OPERATORS['/'][1]) ]), None)) ]) ])) @@ -55,8 +55,8 @@ tests = [ ]), None)] ) ), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'o'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'o'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/operators.py b/test/jstests/operators.py index 29e973389..548b8b87d 100644 --- a/test/jstests/operators.py +++ b/test/jstests/operators.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS tests = [ @@ -8,13 +8,13 @@ tests = [ 'code': 'function f() { return 1 << 5; }', 'asserts': [{'value': 32, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.MEMBER, (Token.INT, 5), None, None), - (Token.OP, _OPERATORS['<<'][1]) + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 5), None, None), + (TokenTypes.OP, _OPERATORS['<<'][1]) ]), None) ])) ]) @@ -23,13 +23,13 @@ tests = [ 'code': 'function f() { return 19 & 21;}', 'asserts': [{'value': 17, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 19), None, None), - (Token.MEMBER, (Token.INT, 21), None, None), - (Token.OP, _OPERATORS['&'][1]) + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 19), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 21), None, None), + (TokenTypes.OP, _OPERATORS['&'][1]) ]), None) ])) ]) @@ -38,13 +38,13 @@ tests = [ 'code': 'function f() { return 11 >> 2;}', 'asserts': [{'value': 2, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 11), None, None), - (Token.MEMBER, (Token.INT, 2), None, None), - (Token.OP, _OPERATORS['>>'][1]) + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 11), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None), + (TokenTypes.OP, _OPERATORS['>>'][1]) ]), None) ])) ]) diff --git a/test/jstests/parens.py b/test/jstests/parens.py index 37d717383..d08a0401a 100644 --- a/test/jstests/parens.py +++ b/test/jstests/parens.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS tests = [ @@ -8,20 +8,20 @@ tests = [ 'code': 'function f() { return (1 + 2) * 3; }', 'asserts': [{'value': 9, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.MEMBER, (Token.INT, 2), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ]), None, None), - (Token.MEMBER, (Token.INT, 3), None, None), - (Token.OP, _OPERATORS['*'][1]) + (TokenTypes.MEMBER, (TokenTypes.INT, 3), None, None), + (TokenTypes.OP, _OPERATORS['*'][1]) ]), None) ])) ]) @@ -30,33 +30,33 @@ tests = [ 'code': 'function f() { return (1) + (2) * ((( (( (((((3)))))) )) ));}', 'asserts': [{'value': 7, 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.RETURN, (Token.EXPR, [ - (Token.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 1), None, None) + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.RETURN, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None) ]), None)]), None, None), - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 2), None, None) + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 2), None, None) ]), None)]), None, None), - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 3), None, None) + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 3), None, None) ]), None)]), None, None) ]), None)]), None, None) ]), None)]), None, None) @@ -70,8 +70,8 @@ tests = [ ]), None)]), None, None) ]), None)]), None, None), - (Token.OP, _OPERATORS['*'][1]), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.OP, _OPERATORS['*'][1]), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ])) ]) diff --git a/test/jstests/precedence.py b/test/jstests/precedence.py index 51845a646..72a4c90f9 100644 --- a/test/jstests/precedence.py +++ b/test/jstests/precedence.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _OPERATORS skip = {'interpret': 'Interpreting built-in fields are not yet implemented'} @@ -17,67 +17,67 @@ tests = [ ''', 'asserts': [{'value': [20, 20, 30, 40, 50], 'call': ('f',)}], 'ast': [ - (Token.FUNC, 'f', [], [ - (Token.VAR, + (TokenTypes.FUNC, 'f', [], [ + (TokenTypes.VAR, zip(['a'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ARRAY, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 10), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 20), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 30), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 40), None, None)]), None), - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 50), None, None)]), None) + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ARRAY, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 10), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 20), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 30), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 40), None, None)]), None), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 50), None, None)]), None) ]), None, None), ]), None) ]) ), - (Token.VAR, + (TokenTypes.VAR, zip(['b'], - [(Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 6), None, None)]), None)] + [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 6), None, None)]), None)] ) ), - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, - (Token.ELEM, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.ELEM, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ]), None)) ]), - (Token.ASSIGN, + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'a'), + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, - (Token.ELEM, (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'b'), None, None), - (Token.MEMBER, (Token.ID, 'a'), None, (Token.FIELD, 'length', None)), - (Token.OP, _OPERATORS['%'][1]) + (TokenTypes.ELEM, (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'b'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, (TokenTypes.FIELD, 'length', None)), + (TokenTypes.OP, _OPERATORS['%'][1]) ]), None)]), None)) ]), None) ) ]), - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'a'), None, None)]), None) + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'a'), None, None)]), None) ]) ) ]) diff --git a/test/jstests/strange_chars.py b/test/jstests/strange_chars.py index c4a28c772..5fedd1d17 100644 --- a/test/jstests/strange_chars.py +++ b/test/jstests/strange_chars.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _OPERATORS tests = [ @@ -8,24 +8,24 @@ tests = [ 'code': 'function $_xY1 ($_axY1) { var $_axY2 = $_axY1 + 1; return $_axY2; }', 'asserts': [{'value': 21, 'call': ('$_xY1', 20)}], 'ast': [ - (Token.FUNC, '$_xY1', ['$_axY1'], [ - (Token.VAR, + (TokenTypes.FUNC, '$_xY1', ['$_axY1'], [ + (TokenTypes.VAR, zip(['$_axY2'], - [(Token.ASSIGN, + [(TokenTypes.ASSIGN, None, - (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, '$_axY1'), None, None), - (Token.MEMBER, (Token.INT, 1), None, None), - (Token.OP, _OPERATORS['+'][1]) + (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, '$_axY1'), None, None), + (TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None), + (TokenTypes.OP, _OPERATORS['+'][1]) ]), None) ]) ), - (Token.RETURN, - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.RETURN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, '$_axY2'), None, None)]), + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, '$_axY2'), None, None)]), None)] ) ) diff --git a/test/jstests/switch.py b/test/jstests/switch.py index 29547ec05..236f88e73 100644 --- a/test/jstests/switch.py +++ b/test/jstests/switch.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _UNARY_OPERATORS skip = { @@ -31,47 +31,47 @@ tests = [ {'value': 6, 'call': ('a', 6)}, {'value': 8, 'call': ('a', 7)}], 'ast': [ - (Token.FUNC, 'a', ['x'], [ - (Token.SWITCH, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None) + (TokenTypes.FUNC, 'a', ['x'], [ + (TokenTypes.SWITCH, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None) ]), None)]), [ - ((Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 6), None, None)]), None)]), + ((TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 6), None, None)]), None)]), [ - (Token.BREAK, None) + (TokenTypes.BREAK, None) ]), - ((Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 5), None, None)]), None)]), + ((TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 5), None, None)]), None)]), [ - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.POSTFIX, _UNARY_OPERATORS['++'][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.POSTFIX, _UNARY_OPERATORS['++'][1]) ]), None)]) ]), - ((Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.INT, 8), None, None)]), None)]), + ((TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.INT, 8), None, None)]), None)]), [ - (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.POSTFIX, _UNARY_OPERATORS['--'][1]) + (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.POSTFIX, _UNARY_OPERATORS['--'][1]) ]), None)]), - (Token.BREAK, None) + (TokenTypes.BREAK, None) ]), (None, [ - (Token.EXPR, [ - (Token.ASSIGN, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'x'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 0), None, None)]), None) + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 0), None, None)]), None) ) ]) ]) ] ), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'x'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/try_statement.py b/test/jstests/try_statement.py index 82f2a5d34..9e5ffe373 100644 --- a/test/jstests/try_statement.py +++ b/test/jstests/try_statement.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes skip = { 'jsinterp': 'Try statement is not supported', diff --git a/test/jstests/while_loop.py b/test/jstests/while_loop.py index edb358451..c6b20f957 100644 --- a/test/jstests/while_loop.py +++ b/test/jstests/while_loop.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes from youtube_dl.jsinterp2.tstream import _ASSIGN_OPERATORS, _UNARY_OPERATORS, _RELATIONS skip = { @@ -21,30 +21,30 @@ tests = [ ''', 'asserts': [{'value': 5, 'call': ('f', 5)}], 'ast': [ - (Token.FUNC, 'f', ['x'], [ - (Token.EXPR, [ - (Token.ASSIGN, _ASSIGN_OPERATORS['='][1], - (Token.OPEXPR, [(Token.MEMBER, (Token.ID, 'i'), None, None)]), - (Token.ASSIGN, None, (Token.OPEXPR, [(Token.MEMBER, (Token.INT, 1), None, None)]), None)) + (TokenTypes.FUNC, 'f', ['x'], [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, _ASSIGN_OPERATORS['='][1], + (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None)]), + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [(TokenTypes.MEMBER, (TokenTypes.INT, 1), None, None)]), None)) ]), - (Token.WHILE, - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None), - (Token.MEMBER, (Token.ID, 'x'), None, None), - (Token.REL, _RELATIONS['<'][1]) + (TokenTypes.WHILE, + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None), + (TokenTypes.MEMBER, (TokenTypes.ID, 'x'), None, None), + (TokenTypes.REL, _RELATIONS['<'][1]) ]), None) ]), - (Token.BLOCK, [ - (Token.EXPR, [ - (Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None), - (Token.POSTFIX, _UNARY_OPERATORS['++'][1]) + (TokenTypes.BLOCK, [ + (TokenTypes.EXPR, [ + (TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None), + (TokenTypes.POSTFIX, _UNARY_OPERATORS['++'][1]) ]), None) ]) ])), - (Token.RETURN, (Token.EXPR, [(Token.ASSIGN, None, (Token.OPEXPR, [ - (Token.MEMBER, (Token.ID, 'i'), None, None)]), None)])) + (TokenTypes.RETURN, (TokenTypes.EXPR, [(TokenTypes.ASSIGN, None, (TokenTypes.OPEXPR, [ + (TokenTypes.MEMBER, (TokenTypes.ID, 'i'), None, None)]), None)])) ]) ] } diff --git a/test/jstests/with_statement.py b/test/jstests/with_statement.py index 7369a3c90..efe86ae89 100644 --- a/test/jstests/with_statement.py +++ b/test/jstests/with_statement.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsgrammar import Token +from youtube_dl.jsinterp2.jsgrammar import TokenTypes skip = { 'jsinterp': 'With statement is not supported', diff --git a/youtube_dl/jsinterp2/jsbuilt_ins/jsarray.py b/youtube_dl/jsinterp2/jsbuilt_ins/jsarray.py index d1a52cd86..717ef3b7f 100644 --- a/youtube_dl/jsinterp2/jsbuilt_ins/jsarray.py +++ b/youtube_dl/jsinterp2/jsbuilt_ins/jsarray.py @@ -1,6 +1,6 @@ from __future__ import unicode_literals -from youtube_dl.jsinterp2.jsbuilt_ins.internals import to_uint32, to_integer +from .internals import to_uint32, to_integer from .base import native_number, undefined from .jsobject import JSObject, JSObjectPrototype from .jsnumber import JSNumberPrototype diff --git a/youtube_dl/jsinterp2/jsgrammar.py b/youtube_dl/jsinterp2/jsgrammar.py index 4b2e228c0..993de53c0 100644 --- a/youtube_dl/jsinterp2/jsgrammar.py +++ b/youtube_dl/jsinterp2/jsgrammar.py @@ -20,7 +20,7 @@ _token_names = ('COPEN', 'CCLOSE', 'POPEN', 'PCLOSE', 'SOPEN', 'SCLOSE', 'PROPGET', 'PROPSET', 'PROPVALUE', 'RSV') -Token = namedtuple('Token', _token_names)._make(_token_names) +TokenTypes = namedtuple('Token', _token_names)._make(_token_names) __DECIMAL_RE = r'(?:[1-9][0-9]*)|0' __OCTAL_RE = r'0[0-7]+' @@ -61,20 +61,21 @@ _NULL_RE = r'null' _REGEX_FLAGS_RE = r'(?![gimy]*(?P[gimy])[gimy]*(?P=reflag))(?P<%s>[gimy]{0,4}\b)' % 'REFLAGS' _REGEX_RE = r'/(?!\*)(?P<%s>(?:[^/\n]|(?:\\/))*)/(?:(?:%s)|(?:\s|$))' % ('REBODY', _REGEX_FLAGS_RE) -token_keys = Token.NULL, Token.BOOL, Token.ID, Token.STR, Token.INT, Token.FLOAT, Token.REGEX +token_keys = TokenTypes.NULL, TokenTypes.BOOL, TokenTypes.ID, TokenTypes.STR, TokenTypes.INT, TokenTypes.FLOAT, TokenTypes.REGEX _TOKENS = zip(token_keys, (_NULL_RE, _BOOL_RE, _NAME_RE, _STRING_RE, _INTEGER_RE, _FLOAT_RE, _REGEX_RE)) -COMMENT_RE = r'(?P<%s>/\*(?:(?!\*/)(?:\n|.))*\*/)' % Token.COMMENT +COMMENT_RE = r'(?P<%s>/\*(?:(?!\*/)(?:\n|.))*\*/)' % TokenTypes.COMMENT TOKENS_RE = r'|'.join('(?P<%(id)s>%(value)s)' % {'id': name, 'value': value} for name, value in _TOKENS) -LOGICAL_OPERATORS_RE = r'(?P<%s>%s)' % (Token.LOP, r'|'.join(re.escape(value) for value in _logical_operator)) -UNARY_OPERATORS_RE = r'(?P<%s>%s)' % (Token.UOP, r'|'.join(re.escape(value) for value in _unary_operator)) -ASSIGN_OPERATORS_RE = r'(?P<%s>%s)' % (Token.AOP, +LOGICAL_OPERATORS_RE = r'(?P<%s>%s)' % (TokenTypes.LOP, r'|'.join(re.escape(value) for value in _logical_operator)) +UNARY_OPERATORS_RE = r'(?P<%s>%s)' % (TokenTypes.UOP, r'|'.join(re.escape(value) for value in _unary_operator)) +ASSIGN_OPERATORS_RE = r'(?P<%s>%s)' % (TokenTypes.AOP, r'|'.join(re.escape(value) if value != '=' else re.escape(value) + r'(?!\=)' for value in _assign_operator)) -OPERATORS_RE = r'(?P<%s>%s)' % (Token.OP, r'|'.join(re.escape(value) for value in _operator)) -RELATIONS_RE = r'(?P<%s>%s)' % (Token.REL, r'|'.join(re.escape(value) for value in _relation)) -PUNCTUATIONS_RE = r'(?P<%s>%s)' % (Token.PUNCT, r'|'.join(re.escape(value) for value in _punctuations)) +OPERATORS_RE = r'(?P<%s>%s)' % (TokenTypes.OP, r'|'.join(re.escape(value) for value in _operator)) +RELATIONS_RE = r'(?P<%s>%s)' % (TokenTypes.REL, r'|'.join(re.escape(value) for value in _relation)) +PUNCTUATIONS_RE = r'(?P<%s>%s)' % (TokenTypes.PUNCT, r'|'.join(re.escape(value) for value in _punctuations)) +LINETERMINATORSEQ_RE = r'\n|\r(?!\n)|\u2028|\u2029' diff --git a/youtube_dl/jsinterp2/jsinterp.py b/youtube_dl/jsinterp2/jsinterp.py index 455bb9340..0b5c3f63b 100644 --- a/youtube_dl/jsinterp2/jsinterp.py +++ b/youtube_dl/jsinterp2/jsinterp.py @@ -5,7 +5,7 @@ import re from ..compat import compat_str from ..utils import ExtractorError from .jsparser import Parser -from .jsgrammar import Token, token_keys +from .jsgrammar import TokenTypes, token_keys from .jsbuilt_ins import global_obj from .jsbuilt_ins.base import isprimitive from .jsbuilt_ins.internals import to_string @@ -101,7 +101,7 @@ class JSInterpreter(object): name = stmt[0] ref = None - if name == Token.FUNC: + if name == TokenTypes.FUNC: name, args, body = stmt[1:] if name is not None: if self._context_stack: @@ -110,23 +110,23 @@ class JSInterpreter(object): self.global_vars[name] = Reference(self.build_function(args, body), (self.this, name)) else: raise ExtractorError('Function expression is not yet implemented') - elif name is Token.BLOCK: + elif name is TokenTypes.BLOCK: block = stmt[1] for stmt in block: s = self.interpret_statement(stmt) if s is not None: ref = s.getvalue() - elif name is Token.VAR: + elif name is TokenTypes.VAR: for name, value in stmt[1]: value = (self.interpret_expression(value).getvalue() if value is not None else global_obj.get_prop('undefined')) self.this[name] = Reference(value, (self.this, name)) - elif name is Token.EXPR: + elif name is TokenTypes.EXPR: for expr in stmt[1]: ref = self.interpret_expression(expr) # if # continue, break - elif name is Token.RETURN: + elif name is TokenTypes.RETURN: ref = self.interpret_statement(stmt[1]) self._context.ended = True # with @@ -144,7 +144,7 @@ class JSInterpreter(object): return name = expr[0] - if name is Token.ASSIGN: + if name is TokenTypes.ASSIGN: op, left, right = expr[1:] if op is None: ref = self.interpret_expression(left) @@ -154,11 +154,11 @@ class JSInterpreter(object): except ExtractorError: lname = left[0] key = None - if lname is Token.OPEXPR and len(left[1]) == 1: + if lname is TokenTypes.OPEXPR and len(left[1]) == 1: lname = left[1][0][0] - if lname is Token.MEMBER: + if lname is TokenTypes.MEMBER: lid, args, tail = left[1][0][1:] - if lid[0] is Token.ID and args is None and tail is None: + if lid[0] is TokenTypes.ID and args is None and tail is None: key = lid[1] if key is not None: u = Reference(global_obj.get_prop('undefined'), (self.this, key)) @@ -171,10 +171,10 @@ class JSInterpreter(object): # XXX check specs what to return ref = leftref - elif name is Token.EXPR: + elif name is TokenTypes.EXPR: ref = self.interpret_statement(expr) - elif name is Token.OPEXPR: + elif name is TokenTypes.OPEXPR: stack = [] postfix = [] rpn = expr[1][:] @@ -182,18 +182,18 @@ class JSInterpreter(object): while rpn: token = rpn.pop(0) # XXX relation 'in' 'instanceof' - if token[0] in (Token.OP, Token.AOP, Token.LOP, Token.REL): + if token[0] in (TokenTypes.OP, TokenTypes.AOP, TokenTypes.LOP, TokenTypes.REL): right = stack.pop() left = stack.pop() stack.append(Reference(token[1](left.getvalue(), right.getvalue()))) # XXX add unary operator 'delete', 'void', 'instanceof' - elif token[0] is Token.UOP: + elif token[0] is TokenTypes.UOP: right = stack.pop() stack.append(Reference(token[1](right.getvalue()))) - elif token[0] is Token.PREFIX: + elif token[0] is TokenTypes.PREFIX: right = stack.pop() stack.append(Reference(right.putvalue(token[1](right.getvalue())))) - elif token[0] is Token.POSTFIX: + elif token[0] is TokenTypes.POSTFIX: postfix.append((stack[-1], token[1])) else: stack.append(self.interpret_expression(token)) @@ -205,7 +205,7 @@ class JSInterpreter(object): else: raise ExtractorError('Expression has too many values') - elif name is Token.MEMBER: + elif name is TokenTypes.MEMBER: # TODO interpret member target, args, tail = expr[1:] target = self.interpret_expression(target) @@ -215,13 +215,13 @@ class JSInterpreter(object): source = None while tail is not None: tail_name, tail_value, tail = tail - if tail_name is Token.FIELD: + if tail_name is TokenTypes.FIELD: source = to_js(target.getvalue()) target = source.get_prop(tail_value) - elif tail_name is Token.ELEM: + elif tail_name is TokenTypes.ELEM: prop = self.interpret_expression(tail_value).getvalue() target = to_js(target.getvalue()).get_prop(to_string(to_js(prop))) - elif tail_name is Token.CALL: + elif tail_name is TokenTypes.CALL: args = (self.interpret_expression(arg).getvalue() for arg in tail_value) if isprimitive(target): if source is None: @@ -239,7 +239,7 @@ class JSInterpreter(object): target = Reference(target.getvalue()) ref = target - elif name is Token.ID: + elif name is TokenTypes.ID: # XXX error handling (unknown id) id = expr[1] try: @@ -255,7 +255,7 @@ class JSInterpreter(object): elif name in token_keys: ref = Reference(expr[1]) - elif name is Token.ARRAY: + elif name is TokenTypes.ARRAY: array = [] for key, elem in enumerate(expr[1]): value = self.interpret_expression(elem).getvalue() diff --git a/youtube_dl/jsinterp2/jsparser.py b/youtube_dl/jsinterp2/jsparser.py index beaddcb09..d8b2346c2 100644 --- a/youtube_dl/jsinterp2/jsparser.py +++ b/youtube_dl/jsinterp2/jsparser.py @@ -1,18 +1,18 @@ from __future__ import unicode_literals -from ..utils import ExtractorError -from .jsgrammar import Token, token_keys +from .jsgrammar import TokenTypes, token_keys from .tstream import TokenStream, convert_to_unary +from ..utils import ExtractorError class Parser(object): - + def __init__(self, code, pos=0, stack_size=100): super(Parser, self).__init__() self.token_stream = TokenStream(code, pos) self.stack_top = stack_size self._no_in = True - + def parse(self): while not self.token_stream.ended: yield self._source_element(self.stack_top) @@ -21,68 +21,68 @@ class Parser(object): if stack_top < 0: raise ExtractorError('Recursion limit reached') - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.ID and token_value == 'function': + token = self.token_stream.peek() + if token.id is TokenTypes.ID and token.value == 'function': source_element = self._function(stack_top - 1) else: source_element = self._statement(stack_top - 1) return source_element - + def _statement(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') statement = None - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.END: + token = self.token_stream.peek() + if token.id is TokenTypes.END: # empty statement goes straight here self.token_stream.pop() return statement # block - elif token_id is Token.COPEN: + elif token.id is TokenTypes.COPEN: # XXX refactor will deprecate some _statement calls - open_pos = token_pos + open_pos = token.pos self.token_stream.pop() block = [] while True: - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.CCLOSE: + token = self.token_stream.peek() + if token.id is TokenTypes.CCLOSE: self.token_stream.pop() break - elif token_id is Token.END and self.token_stream.ended: + elif token.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) block.append(self._statement(stack_top - 1)) - statement = (Token.BLOCK, block) + statement = (TokenTypes.BLOCK, block) - elif token_id is Token.ID: - if token_value == 'var': + elif token.id is TokenTypes.ID: + if token.value == 'var': self.token_stream.pop() variables = [] init = [] has_another = True while has_another: - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.ID: - raise ExtractorError('Missing variable name at %d' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.ID: + raise ExtractorError('Missing variable name at %d' % token.pos) self.token_stream.chk_id(last=True) - variables.append(token_value) + variables.append(token.value) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.AOP: + peek = self.token_stream.peek() + if peek.id is TokenTypes.AOP: self.token_stream.pop() init.append(self._assign_expression(stack_top - 1)) - peek_id, peek_value, peek_pos = self.token_stream.peek() + peek = self.token_stream.peek() else: init.append(None) - if peek_id is Token.END: + if peek.id is TokenTypes.END: if self._no_in: self.token_stream.pop() has_another = False - elif peek_id is Token.COMMA: + elif peek.id is TokenTypes.COMMA: # TODO for not NoIn pass else: @@ -90,95 +90,95 @@ class Parser(object): # - token_id is Token.CCLOSE # - check line terminator # - restricted token - raise ExtractorError('Unexpected sequence at %d' % peek_pos) - statement = (Token.VAR, zip(variables, init)) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) + statement = (TokenTypes.VAR, zip(variables, init)) - elif token_value == 'if': + elif token.value == 'if': statement = self._if_statement(stack_top - 1) - elif token_value == 'for': + elif token.value == 'for': statement = self._for_loop(stack_top - 1) - elif token_value == 'do': + elif token.value == 'do': statement = self._do_loop(stack_top - 1) - elif token_value == 'while': + elif token.value == 'while': statement = self._while_loop(stack_top - 1) - elif token_value in ('break', 'continue'): + elif token.value in ('break', 'continue'): self.token_stream.pop() - token = {'break': Token.BREAK, 'continue': Token.CONTINUE}[token_value] - peek_id, peek_value, peek_pos = self.token_stream.peek() + token = {'break': TokenTypes.BREAK, 'continue': TokenTypes.CONTINUE}[token.value] + peek = self.token_stream.peek() # XXX no line break here label_name = None - if peek_id is not Token.END: + if peek.id is not TokenTypes.END: self.token_stream.chk_id() - label_name = peek_value + label_name = peek.value self.token_stream.pop() statement = (token, label_name) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('Unexpected sequence at %d' % peek_pos) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) - elif token_value == 'return': + elif token.value == 'return': statement = self._return_statement(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('Unexpected sequence at %d' % peek_pos) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) - elif token_value == 'with': + elif token.value == 'with': statement = self._with_statement(stack_top - 1) - elif token_value == 'switch': + elif token.value == 'switch': statement = self._switch_statement(stack_top - 1) - elif token_value == 'throw': + elif token.value == 'throw': self.token_stream.pop() # XXX no line break here expr = self._expression(stack_top - 1) - statement = (Token.RETURN, expr) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + statement = (TokenTypes.RETURN, expr) + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('Unexpected sequence at %d' % peek_pos) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) - elif token_value == 'try': + elif token.value == 'try': statement = self._try_statement(stack_top - 1) - elif token_value == 'debugger': + elif token.value == 'debugger': self.token_stream.pop() - statement = (Token.DEBUG,) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + statement = (TokenTypes.DEBUG,) + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('Unexpected sequence at %d' % peek_pos) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) else: # label # XXX possible refactoring (this is the only branch not popping) - token_id, token_value, token_pos = self.token_stream.peek(2) - if token_id is Token.COLON: - token_id, label_name, token_pos = self.token_stream.pop(2) + token = self.token_stream.peek(2) + if token.id is TokenTypes.COLON: + token = self.token_stream.pop(2) self.token_stream.chk_id(last=True) - statement = (Token.LABEL, label_name, self._statement(stack_top - 1)) + statement = (TokenTypes.LABEL, token.value, self._statement(stack_top - 1)) # expr if statement is None: statement = self._expression(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('Unexpected sequence at %d' % peek_pos) + raise ExtractorError('Unexpected sequence at %d' % peek.pos) return statement @@ -187,63 +187,63 @@ class Parser(object): raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('Missing condition at %d' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('Missing condition at %d' % token.pos) cond_expr = self._expression(stack_top - 1) self.token_stream.pop() # Token.PCLOSE true_stmt = self._statement(stack_top - 1) false_stmt = None - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.ID and token_value == 'else': + token = self.token_stream.peek() + if token.id is TokenTypes.ID and token.value == 'else': self.token_stream.pop() false_stmt = self._statement(stack_top - 1) - return (Token.IF, cond_expr, true_stmt, false_stmt) + return (TokenTypes.IF, cond_expr, true_stmt, false_stmt) def _for_loop(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('''Expected '(' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('''Expected '(' at %d''' % token.pos) # FIXME set infor True (checked by variable declaration and relation expression) self._no_in = False - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.END: + token = self.token_stream.peek() + if token.id is TokenTypes.END: init = None - elif token_id is Token.ID and token_value == 'var': + elif token.id is TokenTypes.ID and token.value == 'var': # XXX change it on refactoring variable declaration list init = self._statement(stack_top - 1) else: init = self._expression(stack_top - 1) self._no_in = True - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is Token.ID and token_value == 'in': + token = self.token_stream.pop() + if token.id is TokenTypes.ID and token.value == 'in': cond = self._expression(stack_top - 1) # FIXME further processing of operator 'in' needed for interpretation incr = None # NOTE ES6 has 'of' operator - elif token_id is Token.END: - token_id, token_value, token_pos = self.token_stream.peek() - cond = None if token_id is Token.END else self._expression(stack_top - 1) + elif token.id is TokenTypes.END: + token = self.token_stream.peek() + cond = None if token.id is TokenTypes.END else self._expression(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.END: - raise ExtractorError('''Expected ';' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.END: + raise ExtractorError('''Expected ';' at %d''' % token.pos) - token_id, token_value, token_pos = self.token_stream.peek() - incr = None if token_id is Token.END else self._expression(stack_top - 1) + token = self.token_stream.peek() + incr = None if token.id is TokenTypes.END else self._expression(stack_top - 1) else: - raise ExtractorError('Invalid condition in for loop initialization at %d' % token_pos) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.PCLOSE: - raise ExtractorError('''Expected ')' at %d''' % token_pos) + raise ExtractorError('Invalid condition in for loop initialization at %d' % token.pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.PCLOSE: + raise ExtractorError('''Expected ')' at %d''' % token.pos) body = self._statement(stack_top - 1) - return (Token.FOR, init, cond, incr, body) + return (TokenTypes.FOR, init, cond, incr, body) def _do_loop(self, stack_top): if stack_top < 0: @@ -251,149 +251,150 @@ class Parser(object): self.token_stream.pop() body = self._statement(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.ID and token_value != 'while': - raise ExtractorError('''Expected 'while' at %d''' % token_pos) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('''Expected '(' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.ID and token.value != 'while': + raise ExtractorError('''Expected 'while' at %d''' % token.pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('''Expected '(' at %d''' % token.pos) expr = self._expression(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.PCLOSE: - raise ExtractorError('''Expected ')' at %d''' % token_pos) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.END: + token = self.token_stream.pop() + if token.id is not TokenTypes.PCLOSE: + raise ExtractorError('''Expected ')' at %d''' % token.pos) + peek = self.token_stream.peek() + if peek.id is TokenTypes.END: self.token_stream.pop() else: # FIXME automatic end insertion - raise ExtractorError('''Expected ';' at %d''' % peek_pos) - return (Token.DO, expr, body) + raise ExtractorError('''Expected ';' at %d''' % peek.pos) + return (TokenTypes.DO, expr, body) def _while_loop(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('''Expected '(' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('''Expected '(' at %d''' % token.pos) expr = self._expression(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.PCLOSE: - raise ExtractorError('''Expected ')' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.PCLOSE: + raise ExtractorError('''Expected ')' at %d''' % token.pos) body = self._statement(stack_top) - return (Token.WHILE, expr, body) + return (TokenTypes.WHILE, expr, body) def _return_statement(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - peek_id, peek_value, peek_pos = self.token_stream.peek() + peek = self.token_stream.peek() # XXX no line break here - expr = self._expression(stack_top - 1) if peek_id is not Token.END else None - return (Token.RETURN, expr) + expr = self._expression(stack_top - 1) if peek.id is not TokenTypes.END else None + return (TokenTypes.RETURN, expr) def _with_statement(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('Missing expression at %d' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('Missing expression at %d' % token.pos) expr = self._expression(stack_top - 1) self.token_stream.pop() # Token.PCLOSE - return (Token.WITH, expr, self._statement(stack_top - 1)) + return (TokenTypes.WITH, expr, self._statement(stack_top - 1)) def _switch_statement(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('Missing expression at %d' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('Missing expression at %d' % token.pos) discriminant = self._expression(stack_top - 1) self.token_stream.pop() # Token.PCLOSE - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.COPEN: - raise ExtractorError('Missing case block at %d' % token_pos) - open_pos = token_pos + token = self.token_stream.pop() + if token.id is not TokenTypes.COPEN: + raise ExtractorError('Missing case block at %d' % token.pos) + open_pos = token.pos has_default = False block = [] while True: - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.CCLOSE: + token = self.token_stream.peek() + if token.id is TokenTypes.CCLOSE: break - elif token_id is Token.ID and token_value == 'case': + elif token.id is TokenTypes.ID and token.value == 'case': self.token_stream.pop() expr = self._expression(stack_top - 1) - elif token_id is Token.ID and token_value == 'default': + elif token.id is TokenTypes.ID and token.value == 'default': if has_default: raise ExtractorError('Multiple default clause') self.token_stream.pop() has_default = True expr = None - elif token_id is Token.END and self.token_stream.ended: + elif token.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) else: raise ExtractorError('Unexpected sequence at %d, default or case clause is expected' % - token_pos) + token.pos) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.COLON: - raise ExtractorError('''Unexpected sequence at %d, ':' is expected''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.COLON: + raise ExtractorError('''Unexpected sequence at %d, ':' is expected''' % token.pos) statement_list = [] while True: - token_id, token_value, token_pos = self.token_stream.peek() - if token_id == Token.CCLOSE or (token_id is Token.ID and (token_value in ('default', 'case'))): + token = self.token_stream.peek() + if token.id == TokenTypes.CCLOSE or ( + token.id is TokenTypes.ID and (token.value in ('default', 'case'))): break - elif token_id is Token.END and self.token_stream.ended: + elif token.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) statement_list.append(self._statement(stack_top - 1)) block.append((expr, statement_list)) self.token_stream.pop() - return (Token.SWITCH, discriminant, block) + return (TokenTypes.SWITCH, discriminant, block) def _try_statement(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is not Token.COPEN: - raise ExtractorError('Block is expected at %d' % token_pos) + token = self.token_stream.peek() + if token.id is not TokenTypes.COPEN: + raise ExtractorError('Block is expected at %d' % token.pos) try_block = self._statement(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() + token = self.token_stream.pop() catch_block = None - if token_id is Token.ID and token_value == 'catch': - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is not Token.POPEN: - raise ExtractorError('Catch clause is missing an identifier at %d' % token_pos) + if token.id is TokenTypes.ID and token.value == 'catch': + token = self.token_stream.peek() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('Catch clause is missing an identifier at %d' % token.pos) self.token_stream.pop() self.token_stream.chk_id() - token_id, error_name, token_pos = self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.PCLOSE: - raise ExtractorError('Catch clause expects a single identifier at %d' % token_pos) - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is not Token.COPEN: - raise ExtractorError('Block is expected at %d' % token_pos) - catch_block = (error_name, self._statement(stack_top - 1)) + error = self.token_stream.pop() + token = self.token_stream.pop() + if token.id is not TokenTypes.PCLOSE: + raise ExtractorError('Catch clause expects a single identifier at %d' % token.pos) + token = self.token_stream.peek() + if token.id is not TokenTypes.COPEN: + raise ExtractorError('Block is expected at %d' % token.pos) + catch_block = (error.value, self._statement(stack_top - 1)) finally_block = None - if token_id is Token.ID and token_value == 'finally': - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is not Token.COPEN: - raise ExtractorError('Block is expected at %d' % token_pos) + if token.id is TokenTypes.ID and token.value == 'finally': + token = self.token_stream.peek() + if token.id is not TokenTypes.COPEN: + raise ExtractorError('Block is expected at %d' % token.pos) finally_block = self._statement(stack_top - 1) if catch_block is None and finally_block is None: - raise ExtractorError('Try statement is expecting catch or finally at %d' % token_pos) - return (Token.TRY, try_block, catch_block, finally_block) + raise ExtractorError('Try statement is expecting catch or finally at %d' % token.pos) + return (TokenTypes.TRY, try_block, catch_block, finally_block) def _expression(self, stack_top): if stack_top < 0: @@ -403,37 +404,37 @@ class Parser(object): has_another = True while has_another: expr_list.append(self._assign_expression(stack_top - 1)) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.COMMA: + peek = self.token_stream.peek() + if peek.id is TokenTypes.COMMA: self.token_stream.pop() - elif peek_id is Token.ID and peek_value == 'yield': + elif peek.id is TokenTypes.ID and peek.value == 'yield': # TODO parse yield - raise ExtractorError('Yield statement is not yet supported at %d' % peek_pos) + raise ExtractorError('Yield statement is not yet supported at %d' % peek.pos) else: has_another = False - return (Token.EXPR, expr_list) + return (TokenTypes.EXPR, expr_list) def _assign_expression(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') left = self._conditional_expression(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.AOP: + peek = self.token_stream.peek() + if peek.id is TokenTypes.AOP: self.token_stream.pop() - _, op = peek_value + _, op = peek.value right = self._assign_expression(stack_top - 1) else: op = None right = None - return (Token.ASSIGN, op, left, right) + return (TokenTypes.ASSIGN, op, left, right) def _member_expression(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.ID and peek_value == 'new': + peek = self.token_stream.peek() + if peek.id is TokenTypes.ID and peek.value == 'new': self.token_stream.pop() target = self._member_expression(stack_top - 1) args = self._arguments(stack_top - 1) @@ -443,39 +444,39 @@ class Parser(object): target = self._primary_expression(stack_top - 1) args = None - return (Token.MEMBER, target, args, self._member_tail(stack_top - 1)) + return (TokenTypes.MEMBER, target, args, self._member_tail(stack_top - 1)) def _member_tail(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.DOT: + peek = self.token_stream.peek() + if peek.id is TokenTypes.DOT: self.token_stream.pop() - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.DOT: + peek = self.token_stream.peek() + if peek.id is TokenTypes.DOT: self.token_stream.pop() - peek_id, peek_value, peek_pos = self.token_stream.peek() - elif peek_id is Token.POPEN: + peek = self.token_stream.peek() + elif peek.id is TokenTypes.POPEN: # TODO parse field query - raise ExtractorError('Field query is not yet supported at %d' % peek_pos) + raise ExtractorError('Field query is not yet supported at %d' % peek.pos) - if peek_id is Token.ID: + if peek.id is TokenTypes.ID: self.token_stream.pop() - return (Token.FIELD, peek_value, self._member_tail(stack_top - 1)) + return (TokenTypes.FIELD, peek.value, self._member_tail(stack_top - 1)) else: - raise ExtractorError('Identifier name expected at %d' % peek_pos) - elif peek_id is Token.SOPEN: + raise ExtractorError('Identifier name expected at %d' % peek.pos) + elif peek.id is TokenTypes.SOPEN: self.token_stream.pop() index = self._expression(stack_top - 1) - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is Token.SCLOSE: - return (Token.ELEM, index, self._member_tail(stack_top - 1)) + token = self.token_stream.pop() + if token.id is TokenTypes.SCLOSE: + return (TokenTypes.ELEM, index, self._member_tail(stack_top - 1)) else: - raise ExtractorError('Unexpected sequence at %d' % token_pos) - elif peek_id is Token.POPEN: + raise ExtractorError('Unexpected sequence at %d' % token.pos) + elif peek.id is TokenTypes.POPEN: args = self._arguments(stack_top - 1) - return (Token.CALL, args, self._member_tail(stack_top - 1)) + return (TokenTypes.CALL, args, self._member_tail(stack_top - 1)) else: return None @@ -484,102 +485,102 @@ class Parser(object): raise ExtractorError('Recursion limit reached') # TODO support let - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id in token_keys: - if peek_id is Token.ID: + peek = self.token_stream.peek() + if peek.id in token_keys: + if peek.id is TokenTypes.ID: # this - if peek_value == 'this': + if peek.value == 'this': self.token_stream.pop() - return (Token.RSV, 'this') + return (TokenTypes.RSV, 'this') # function expr - elif peek_value == 'function': + elif peek.value == 'function': return self._function(stack_top - 1, True) # id else: self.token_stream.chk_id() self.token_stream.pop() - return (Token.ID, peek_value) + return (TokenTypes.ID, peek.value) # literals else: self.token_stream.pop() - return (peek_id, peek_value) + return (peek.id, peek.value) # array - elif peek_id is Token.SOPEN: + elif peek.id is TokenTypes.SOPEN: return self._array_literal(stack_top - 1) # object - elif peek_id is Token.COPEN: + elif peek.id is TokenTypes.COPEN: return self._object_literal(stack_top) # expr - elif peek_id is Token.POPEN: + elif peek.id is TokenTypes.POPEN: self.token_stream.pop() - open_pos = peek_pos + open_pos = peek.pos expr = self._expression(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is not Token.PCLOSE: + peek = self.token_stream.peek() + if peek.id is not TokenTypes.PCLOSE: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) self.token_stream.pop() return expr else: - raise ExtractorError('Syntax error at %d' % peek_pos) + raise ExtractorError('Syntax error at %d' % peek.pos) def _function(self, stack_top, is_expr=False): if stack_top < 0: raise ExtractorError('Recursion limit reached') self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.peek() + token = self.token_stream.peek() name = None - if token_id is Token.ID: + if token.id is TokenTypes.ID: self.token_stream.chk_id() - token_id, name, token_pos = self.token_stream.pop() - token_id, token_value, token_pos = self.token_stream.peek() + name = self.token_stream.pop().value + token = self.token_stream.peek() elif not is_expr: - raise ExtractorError('Function declaration at %d is missing identifier' % token_pos) + raise ExtractorError('Function declaration at %d is missing identifier' % token.pos) - if token_id is not Token.POPEN: - raise ExtractorError('Expected argument list at %d' % token_pos) + if token.id is not TokenTypes.POPEN: + raise ExtractorError('Expected argument list at %d' % token.pos) # args self.token_stream.pop() - open_pos = token_pos + open_pos = token.pos args = [] while True: - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.PCLOSE: + token = self.token_stream.peek() + if token.id is TokenTypes.PCLOSE: self.token_stream.pop() break self.token_stream.chk_id() self.token_stream.pop() - args.append(token_value) - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.COMMA: + args.append(token.value) + token = self.token_stream.peek() + if token.id is TokenTypes.COMMA: self.token_stream.pop() - elif token_id is Token.PCLOSE: + elif token.id is TokenTypes.PCLOSE: pass - elif token_id is Token.END and self.token_stream.ended: + elif token.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) else: - raise ExtractorError('Expected , separator at %d' % token_pos) + raise ExtractorError('Expected , separator at %d' % token.pos) - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is not Token.COPEN: - raise ExtractorError('Expected function body at %d' % token_pos) + token = self.token_stream.peek() + if token.id is not TokenTypes.COPEN: + raise ExtractorError('Expected function body at %d' % token.pos) - return (Token.FUNC, name, args, (self._function_body(stack_top - 1))) + return (TokenTypes.FUNC, name, args, (self._function_body(stack_top - 1))) def _function_body(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') - token_id, token_value, open_pos = self.token_stream.pop() + open_pos = self.token_stream.pop().pos body = [] while True: - token_id, token_value, token_pos = self.token_stream.peek() - if token_id is Token.CCLOSE: + token = self.token_stream.peek() + if token.id is TokenTypes.CCLOSE: self.token_stream.pop() break - elif token_id is Token.END and self.token_stream.ended: + elif token.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) body.append(self._source_element(stack_top - 1)) @@ -589,133 +590,133 @@ class Parser(object): if stack_top < 0: raise ExtractorError('Recursion limit reached') - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.POPEN: + peek = self.token_stream.peek() + if peek.id is TokenTypes.POPEN: self.token_stream.pop() - open_pos = peek_pos + open_pos = peek.pos else: return None args = [] while True: - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.PCLOSE: + peek = self.token_stream.peek() + if peek.id is TokenTypes.PCLOSE: self.token_stream.pop() return args # FIXME handle infor args.append(self._assign_expression(stack_top - 1)) # TODO parse generator expression - peek_id, peek_value, peek_pos = self.token_stream.peek() + peek = self.token_stream.peek() - if peek_id is Token.COMMA: + if peek.id is TokenTypes.COMMA: self.token_stream.pop() - elif peek_id is Token.PCLOSE: + elif peek.id is TokenTypes.PCLOSE: pass - elif peek_id is Token.END and self.token_stream.ended: + elif peek.id is TokenTypes.END and self.token_stream.ended: raise ExtractorError('Unbalanced parentheses at %d' % open_pos) else: - raise ExtractorError('''Expected ',' separator at %d''' % peek_pos) + raise ExtractorError('''Expected ',' separator at %d''' % peek.pos) def _array_literal(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') # XXX check no linebreak here - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is not Token.SOPEN: - raise ExtractorError('Array expected at %d' % peek_pos) + peek = self.token_stream.peek() + if peek.id is not TokenTypes.SOPEN: + raise ExtractorError('Array expected at %d' % peek.pos) self.token_stream.pop() elements = [] has_another = True while has_another: - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.COMMA: + peek = self.token_stream.peek() + if peek.id is TokenTypes.COMMA: self.token_stream.pop() elements.append(None) - elif peek_id is Token.SCLOSE: + elif peek.id is TokenTypes.SCLOSE: self.token_stream.pop() has_another = False - elif peek_id is Token.ID and peek_value == 'for': + elif peek.id is TokenTypes.ID and peek.value == 'for': # TODO parse array comprehension - raise ExtractorError('Array comprehension is not yet supported at %d' % peek_pos) + raise ExtractorError('Array comprehension is not yet supported at %d' % peek.pos) else: elements.append(self._assign_expression(stack_top - 1)) - peek_id, peek_value, peek_pos = self.token_stream.pop() - if peek_id is Token.SCLOSE: + peek = self.token_stream.pop() + if peek.id is TokenTypes.SCLOSE: has_another = False - elif peek_id is not Token.COMMA: - raise ExtractorError('''Expected ',' after element at %d''' % peek_pos) + elif peek.id is not TokenTypes.COMMA: + raise ExtractorError('''Expected ',' after element at %d''' % peek.pos) - return (Token.ARRAY, elements) + return (TokenTypes.ARRAY, elements) def _object_literal(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') - token_id, token_value, open_pos = self.token_stream.pop() + open_pos = self.token_stream.pop().pos property_list = [] while True: - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is Token.CCLOSE: + token = self.token_stream.pop() + if token.id is TokenTypes.CCLOSE: break - elif token_id is Token.COMMA: + elif token.id is TokenTypes.COMMA: continue - elif token_id is Token.ID and token_value in ('get', 'set'): - is_set = token_id is Token.ID and token_value == 'set' + elif token.id is TokenTypes.ID and token.value in ('get', 'set'): + is_set = token.id is TokenTypes.ID and token.value == 'set' - token_id, token_value, token_pos = self.token_stream.pop() - if token_id not in (Token.ID, Token.STR, Token.INT, Token.FLOAT): - raise ExtractorError('Property name is expected at %d' % token_pos) - property_name = token_value - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.POPEN: - raise ExtractorError('''Expected '(' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id not in (TokenTypes.ID, TokenTypes.STR, TokenTypes.INT, TokenTypes.FLOAT): + raise ExtractorError('Property name is expected at %d' % token.pos) + property_name = token.value + token = self.token_stream.pop() + if token.id is not TokenTypes.POPEN: + raise ExtractorError('''Expected '(' at %d''' % token.pos) if is_set: self.token_stream.chk_id() - token_id, arg, token_pos = self.token_stream.pop() + arg = self.token_stream.pop().value - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.PCLOSE: - raise ExtractorError('''Expected ')' at %d''' % token_pos) + token = self.token_stream.pop() + if token.id is not TokenTypes.PCLOSE: + raise ExtractorError('''Expected ')' at %d''' % token.pos) if is_set: - desc = (Token.PROPSET, arg, self._function_body(stack_top - 1)) + desc = (TokenTypes.PROPSET, arg, self._function_body(stack_top - 1)) else: - desc = (Token.PROPGET, self._function_body(stack_top - 1)) + desc = (TokenTypes.PROPGET, self._function_body(stack_top - 1)) - elif token_id in (Token.ID, Token.STR, Token.INT, Token.FLOAT): - property_name = token_value - token_id, token_value, token_pos = self.token_stream.pop() - if token_id is not Token.COLON: - raise ExtractorError('Property name is expected at %d' % token_pos) + elif token.id in (TokenTypes.ID, TokenTypes.STR, TokenTypes.INT, TokenTypes.FLOAT): + property_name = token.value + token = self.token_stream.pop() + if token.id is not TokenTypes.COLON: + raise ExtractorError('Property name is expected at %d' % token.pos) - desc = (Token.PROPVALUE, self._assign_expression(stack_top - 1)) + desc = (TokenTypes.PROPVALUE, self._assign_expression(stack_top - 1)) elif self.token_stream.ended: raise ExtractorError('Unmatched parentheses at %d' % open_pos) else: - raise ExtractorError('Property assignment is expected at %d' % token_pos) + raise ExtractorError('Property assignment is expected at %d' % token.pos) property_list.append((property_name, desc)) - return (Token.OBJECT, property_list) + return (TokenTypes.OBJECT, property_list) def _conditional_expression(self, stack_top): if stack_top < 0: raise ExtractorError('Recursion limit reached') expr = self._operator_expression(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.HOOK: - hook_pos = peek_pos + peek = self.token_stream.peek() + if peek.id is TokenTypes.HOOK: + hook_pos = peek.pos true_expr = self._assign_expression(stack_top - 1) - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.COLON: + peek = self.token_stream.peek() + if peek.id is TokenTypes.COLON: false_expr = self._assign_expression(stack_top - 1) else: raise ExtractorError('Missing : in conditional expression at %d' % hook_pos) - return (Token.COND, expr, true_expr, false_expr) + return (TokenTypes.COND, expr, true_expr, false_expr) return expr def _operator_expression(self, stack_top): @@ -753,71 +754,73 @@ class Parser(object): had_inc = False has_prefix = True while has_prefix: - peek_id, peek_value, peek_pos = self.token_stream.peek() - if peek_id is Token.OP and peek_value[0] in (Token.ADD, Token.SUB): + token = self.token_stream.peek() + peek_id = token.id + peek_value = token.value + if peek_id is TokenTypes.OP and peek_value[0] in (TokenTypes.ADD, TokenTypes.SUB): # any binary operators will be consumed later - peek_id = Token.UOP + peek_id = TokenTypes.UOP peek_value = convert_to_unary(peek_value) - if peek_id is Token.UOP: + if peek_id is TokenTypes.UOP: name, op = peek_value - had_inc = name in (Token.INC, Token.DEC) + had_inc = name in (TokenTypes.INC, TokenTypes.DEC) if had_inc: - peek_id = Token.PREFIX + peek_id = TokenTypes.PREFIX while stack and stack[-1][0] > 16: _, stack_id, stack_op = stack.pop() out.append((stack_id, stack_op)) stack.append((16, peek_id, op)) self.token_stream.pop() - peek_id, peek_value, peek_pos = self.token_stream.peek() - if had_inc and peek_id is not Token.ID: - raise ExtractorError('Prefix operator has to be followed by an identifier at %d' % peek_pos) - has_prefix = peek_id is Token.UOP + token = self.token_stream.peek() + if had_inc and token.id is not TokenTypes.ID: + raise ExtractorError('Prefix operator has to be followed by an identifier at %d' % token.pos) + has_prefix = token.id is TokenTypes.UOP else: has_prefix = False left = self._member_expression(stack_top - 1) out.append(left) - peek_id, peek_value, peek_pos = self.token_stream.peek() + token = self.token_stream.peek() # postfix - if peek_id is Token.UOP: + if token.id is TokenTypes.UOP: if had_inc: - raise ExtractorError('''Can't have prefix and postfix operator at the same time at %d''' % peek_pos) - name, op = peek_value - if name in (Token.INC, Token.DEC): - peek_id = Token.POSTFIX + raise ExtractorError('''Can't have prefix and postfix operator at the same time at %d''' % token.pos) + name, op = token.value + if name in (TokenTypes.INC, TokenTypes.DEC): + peek_id = TokenTypes.POSTFIX prec = 17 else: - raise ExtractorError('Unexpected operator at %d' % peek_pos) + raise ExtractorError('Unexpected operator at %d' % token.pos) while stack and stack[-1][0] >= 17: _, stack_id, stack_op = stack.pop() out.append((stack_id, stack_op)) stack.append((prec, peek_id, op)) self.token_stream.pop() - peek_id, peek_value, peek_pos = self.token_stream.peek() + token = self.token_stream.peek() - if peek_id is Token.REL: - name, op = peek_value + if token.id is TokenTypes.REL: + name, op = token.value prec = 11 - elif peek_id is Token.OP: - name, op = peek_value - if name in (Token.MUL, Token.DIV, Token.MOD): + elif token.id is TokenTypes.OP: + name, op = token.value + if name in (TokenTypes.MUL, TokenTypes.DIV, TokenTypes.MOD): prec = 14 - elif name in (Token.ADD, Token.SUB): + elif name in (TokenTypes.ADD, TokenTypes.SUB): prec = 13 - elif name in (Token.RSHIFT, Token.LSHIFT, Token.URSHIFT): + elif name in (TokenTypes.RSHIFT, TokenTypes.LSHIFT, TokenTypes.URSHIFT): prec = 12 - elif name is Token.BAND: + elif name is TokenTypes.BAND: prec = 9 - elif name is Token.BXOR: + elif name is TokenTypes.BXOR: prec = 8 - elif name is Token.BOR: + elif name is TokenTypes.BOR: prec = 7 else: - raise ExtractorError('Unexpected operator at %d' % peek_pos) - elif peek_id is Token.LOP: - name, op = peek_value - prec = {Token.OR: 5, Token.AND: 6}[name] + raise ExtractorError('Unexpected operator at %d' % token.pos) + elif token.id is TokenTypes.LOP: + name, op = token.value + prec = {TokenTypes.OR: 5, TokenTypes.AND: 6}[name] else: op = None prec = 4 # empties stack @@ -828,7 +831,7 @@ class Parser(object): if op is None: break else: - stack.append((prec, peek_id, op)) + stack.append((prec, token.id, op)) self.token_stream.pop() - return (Token.OPEXPR, out) + return (TokenTypes.OPEXPR, out) diff --git a/youtube_dl/jsinterp2/tstream.py b/youtube_dl/jsinterp2/tstream.py index f615864f8..8572cca9f 100644 --- a/youtube_dl/jsinterp2/tstream.py +++ b/youtube_dl/jsinterp2/tstream.py @@ -5,82 +5,83 @@ import operator from ..utils import ExtractorError from .jsgrammar import ( + ASSIGN_OPERATORS_RE, COMMENT_RE, + LINETERMINATORSEQ_RE, + LOGICAL_OPERATORS_RE, + OPERATORS_RE, TOKENS_RE, PUNCTUATIONS_RE, - LOGICAL_OPERATORS_RE, - UNARY_OPERATORS_RE, RELATIONS_RE, - ASSIGN_OPERATORS_RE, - OPERATORS_RE, - Token + UNARY_OPERATORS_RE, + TokenTypes ) _PUNCTUATIONS = { - '{': Token.COPEN, - '}': Token.CCLOSE, - '(': Token.POPEN, - ')': Token.PCLOSE, - '[': Token.SOPEN, - ']': Token.SCLOSE, - '.': Token.DOT, - ';': Token.END, - ',': Token.COMMA, - '?': Token.HOOK, - ':': Token.COLON + '{': TokenTypes.COPEN, + '}': TokenTypes.CCLOSE, + '(': TokenTypes.POPEN, + ')': TokenTypes.PCLOSE, + '[': TokenTypes.SOPEN, + ']': TokenTypes.SCLOSE, + '.': TokenTypes.DOT, + ';': TokenTypes.END, + ',': TokenTypes.COMMA, + '?': TokenTypes.HOOK, + ':': TokenTypes.COLON } _LOGICAL_OPERATORS = { - '&&': (Token.AND, lambda cur, right: cur and right), - '||': (Token.OR, lambda cur, right: cur or right) + '&&': (TokenTypes.AND, lambda cur, right: cur and right), + '||': (TokenTypes.OR, lambda cur, right: cur or right) } _UNARY_OPERATORS = { - '+': (Token.PLUS, lambda cur: cur), - '-': (Token.NEG, lambda cur: cur * -1), - '++': (Token.INC, lambda cur: cur + 1), - '--': (Token.DEC, lambda cur: cur - 1), - '!': (Token.NOT, operator.not_), - '~': (Token.BNOT, operator.inv), + '+': (TokenTypes.PLUS, lambda cur: cur), + '-': (TokenTypes.NEG, lambda cur: cur * -1), + '++': (TokenTypes.INC, lambda cur: cur + 1), + '--': (TokenTypes.DEC, lambda cur: cur - 1), + '!': (TokenTypes.NOT, operator.not_), + '~': (TokenTypes.BNOT, operator.inv), # XXX define these operators - 'delete': (Token.DEL, None), - 'void': (Token.VOID, None), - 'typeof': (Token.TYPE, lambda cur: type(cur)) + 'delete': (TokenTypes.DEL, None), + 'void': (TokenTypes.VOID, None), + 'typeof': (TokenTypes.TYPE, lambda cur: type(cur)) } _RELATIONS = { - '<': (Token.LT, operator.lt), - '>': (Token.GT, operator.gt), - '<=': (Token.LE, operator.le), - '>=': (Token.GE, operator.ge), + '<': (TokenTypes.LT, operator.lt), + '>': (TokenTypes.GT, operator.gt), + '<=': (TokenTypes.LE, operator.le), + '>=': (TokenTypes.GE, operator.ge), # XXX check python and JavaScript equality difference - '==': (Token.EQ, operator.eq), - '!=': (Token.NE, operator.ne), - '===': (Token.SEQ, lambda cur, right: cur == right and type(cur) == type(right)), - '!==': (Token.SNE, lambda cur, right: not cur == right or not type(cur) == type(right)), - 'in': (Token.IN, operator.contains), - 'instanceof': (Token.INSTANCEOF, lambda cur, right: isinstance(cur, right)) + '==': (TokenTypes.EQ, operator.eq), + '!=': (TokenTypes.NE, operator.ne), + '===': (TokenTypes.SEQ, lambda cur, right: cur == right and type(cur) == type(right)), + '!==': (TokenTypes.SNE, lambda cur, right: not cur == right or not type(cur) == type(right)), + 'in': (TokenTypes.IN, operator.contains), + 'instanceof': (TokenTypes.INSTANCEOF, lambda cur, right: isinstance(cur, right)) } _OPERATORS = { - '|': (Token.BOR, operator.or_), - '^': (Token.BXOR, operator.xor), - '&': (Token.BAND, operator.and_), + '|': (TokenTypes.BOR, operator.or_), + '^': (TokenTypes.BXOR, operator.xor), + '&': (TokenTypes.BAND, operator.and_), # NOTE convert to int before shift float - '>>': (Token.RSHIFT, operator.rshift), - '<<': (Token.LSHIFT, operator.lshift), - '>>>': (Token.URSHIFT, lambda cur, right: cur >> right if cur >= 0 else (cur + 0x100000000) >> right), - '-': (Token.SUB, operator.sub), - '+': (Token.ADD, operator.add), - '%': (Token.MOD, operator.mod), - '/': (Token.DIV, operator.truediv), - '*': (Token.MUL, operator.mul) + '>>': (TokenTypes.RSHIFT, operator.rshift), + '<<': (TokenTypes.LSHIFT, operator.lshift), + '>>>': (TokenTypes.URSHIFT, lambda cur, right: cur >> right if cur >= 0 else (cur + 0x100000000) >> right), + '-': (TokenTypes.SUB, operator.sub), + '+': (TokenTypes.ADD, operator.add), + '%': (TokenTypes.MOD, operator.mod), + '/': (TokenTypes.DIV, operator.truediv), + '*': (TokenTypes.MUL, operator.mul) } _ASSIGN_OPERATORS = dict((op + '=', ('set_%s' % token[0], token[1])) for op, token in _OPERATORS.items()) _ASSIGN_OPERATORS['='] = ('set', lambda cur, right: right) _operator_lookup = { - Token.OP: _OPERATORS, - Token.AOP: _ASSIGN_OPERATORS, - Token.UOP: _UNARY_OPERATORS, - Token.LOP: _LOGICAL_OPERATORS, - Token.REL: _RELATIONS + TokenTypes.OP: _OPERATORS, + TokenTypes.AOP: _ASSIGN_OPERATORS, + TokenTypes.UOP: _UNARY_OPERATORS, + TokenTypes.LOP: _LOGICAL_OPERATORS, + TokenTypes.REL: _RELATIONS } # only to check ids _reserved_words = ('break', 'case', 'catch', 'continue', 'debugger', 'default', 'delete', 'do', 'else', 'finally', @@ -97,9 +98,21 @@ _input_element = re.compile(r'\s*(?:%(comment)s|%(token)s|%(lop)s|%(uop)s|%(aop) 'punct': PUNCTUATIONS_RE }) +_line_terminator = re.compile(LINETERMINATORSEQ_RE) + def convert_to_unary(token_value): - return {Token.ADD: _UNARY_OPERATORS['+'], Token.SUB: _UNARY_OPERATORS['-']}[token_value[0]] + return {TokenTypes.ADD: _UNARY_OPERATORS['+'], TokenTypes.SUB: _UNARY_OPERATORS['-']}[token_value[0]] + + +class Token(object): + def __init__(self, token_type, token_value, pos, line, at): + super(Token, self).__init__() + self.id = token_type + self.value = token_value + self.pos = pos + self.line = line + self.at = at class TokenStream(object): @@ -110,6 +123,7 @@ class TokenStream(object): self.peeked = [] self._ts = self._next_token(start) self._last = None + self._line = 1 + len(_line_terminator.findall(self.code[:start])) def _next_token(self, pos=0): while not self.ended: @@ -118,35 +132,43 @@ class TokenStream(object): token_id = feed_m.lastgroup token_value = feed_m.group(token_id) pos = feed_m.start(token_id) - token_id = Token[Token.index(token_id)] + token_id = TokenTypes[TokenTypes.index(token_id)] + + # TODO use line report insteadof position + lt_count, lt_match = 0, None + for lt_count, lt_match in enumerate(_line_terminator.finditer(token_value)): pass + lt_last = pos if lt_match is None else pos + lt_match.start() + at = pos - lt_last + self._line += lt_count + self.ended = feed_m.end() >= len(self.code) # because how yield works - if token_id is Token.COMMENT: + if token_id is TokenTypes.COMMENT: pass # TODO date - elif token_id is Token.NULL: - yield (token_id, None, pos) - elif token_id is Token.BOOL: - yield (token_id, {'true': True, 'false': False}[token_value], pos) - elif token_id is Token.STR: - yield (token_id, token_value[1:-1], pos) - elif token_id is Token.INT: + elif token_id is TokenTypes.NULL: + yield Token(token_id, None, pos, self._line, at) + elif token_id is TokenTypes.BOOL: + yield Token(token_id, {'true': True, 'false': False}[token_value], pos, self._line, at) + elif token_id is TokenTypes.STR: + yield Token(token_id, token_value[1:-1], pos, self._line, at) + elif token_id is TokenTypes.INT: root = ((16 if len(token_value) > 2 and token_value[1] in 'xX' else 8) if token_value.startswith('0') else 10) - yield (token_id, int(token_value, root), pos) - elif token_id is Token.FLOAT: - yield (token_id, float(token_value), pos) - elif token_id is Token.REGEX: + yield Token(token_id, int(token_value, root), pos, self._line, at) + elif token_id is TokenTypes.FLOAT: + yield Token(token_id, float(token_value), pos, self._line, at) + elif token_id is TokenTypes.REGEX: # TODO error handling regex = re.compile(feed_m.group('rebody')) - yield (token_id, (regex, feed_m.group('reflags')), pos) - elif token_id is Token.ID: - yield (token_id, token_value, pos) + yield Token(token_id, (regex, feed_m.group('reflags')), pos, self._line, at) + elif token_id is TokenTypes.ID: + yield Token(token_id, token_value, pos, self._line, at) elif token_id in _operator_lookup: - yield (token_id if token_value != 'in' else Token.IN, - _operator_lookup[token_id][token_value], - pos) - elif token_id is Token.PUNCT: - yield (_PUNCTUATIONS[token_value], token_value, pos) + yield Token(token_id if token_value != 'in' else TokenTypes.IN, + _operator_lookup[token_id][token_value], + pos, self._line, at) + elif token_id is TokenTypes.PUNCT: + yield Token(_PUNCTUATIONS[token_value], token_value, pos, self._line, at) else: raise ExtractorError('Unexpected token at %d' % pos) pos = feed_m.end() @@ -157,17 +179,24 @@ class TokenStream(object): def chk_id(self, last=False): if last: - name, value, pos = self._last + token = self._last else: - name, value, pos = self.peek() - if name is not Token.ID or value in _reserved_words: - raise ExtractorError('Invalid identifier at %d' % pos) + token = self.peek() + if token.id is not TokenTypes.ID or token.value in _reserved_words: + raise ExtractorError('Invalid identifier at %d' % token.pos) def peek(self, count=1): for _ in range(count - len(self.peeked)): token = next(self._ts, None) if token is None: - self.peeked.append((Token.END, ';', len(self.code))) + pos = len(self.code) + + lt_count, lt_match = 0, None + for lt_count, lt_match in enumerate(_line_terminator.finditer(self.code)): pass + lt_last = pos if lt_match is None else pos + lt_match.start() + at = pos - lt_last + + self.peeked.append(Token(TokenTypes.END, ';', pos, self._line, at)) else: self.peeked.append(token) return self.peeked[count - 1]