From eaffeaedd2949f87b8493dd959975e867849cc92 Mon Sep 17 00:00:00 2001 From: Donne Martin Date: Sat, 6 Apr 2019 12:57:05 -0400 Subject: [PATCH] Revert "Add Python 3.7 support (#159)" This reverts commit 62c2ccad893ec8293d4fd46999e0a54e12cac40f. --- setup.py | 11 +- tests/test_completer.py | 16 +- tox.ini | 2 +- xonsh/__init__.py | 105 +- xonsh/__main__.py | 3 - xonsh/aliases.py | 919 +---- xonsh/ansi_colors.py | 1095 ------ xonsh/ast.py | 533 +-- xonsh/base_shell.py | 568 +-- xonsh/built_ins.py | 1929 +++------ xonsh/codecache.py | 209 - xonsh/color_tools.py | 419 -- xonsh/commands_cache.py | 441 --- xonsh/completer.py | 426 +- xonsh/completers/__init__.py | 45 - xonsh/completers/_aliases.py | 190 - xonsh/completers/base.py | 31 - xonsh/completers/bash.py | 23 - xonsh/completers/bash_completion.py | 453 --- xonsh/completers/commands.py | 64 - xonsh/completers/completer.py | 35 - xonsh/completers/dirs.py | 26 - xonsh/completers/init.py | 39 - xonsh/completers/man.py | 57 - xonsh/completers/path.py | 326 -- xonsh/completers/pip.py | 56 - xonsh/completers/python.py | 290 -- xonsh/completers/tools.py | 33 - xonsh/completers/xompletions.py | 48 - xonsh/contexts.py | 117 - xonsh/data/data.txt | 348 -- xonsh/diff_history.py | 329 +- xonsh/dirstack.py | 528 +-- xonsh/dumb_shell.py | 12 - xonsh/environ.py | 2012 +++------- xonsh/events.py | 347 -- xonsh/execer.py | 283 +- xonsh/foreign_shells.py | 697 +--- xonsh/fs.py | 98 - xonsh/history.py | 392 ++ xonsh/history/__init__.py | 27 - xonsh/history/base.py | 155 - xonsh/history/dummy.py | 23 - xonsh/history/json.py | 436 -- xonsh/history/main.py | 417 -- xonsh/history/sqlite.py | 240 -- xonsh/imphooks.py | 264 +- xonsh/inspectors.py | 401 +- xonsh/jobs.py | 489 +-- xonsh/jsonutils.py | 19 - xonsh/jupyter_kernel.py | 478 --- xonsh/jupyter_shell.py | 144 - xonsh/lazyasd.py | 351 -- xonsh/lazyimps.py | 85 - xonsh/lazyjson.py | 119 +- xonsh/lexer.py | 714 ++-- xonsh/macutils.py | 22 - xonsh/main.py | 595 +-- xonsh/openpy.py | 159 +- xonsh/parser.py | 2520 +++++++++++- xonsh/parsers/__init__.py | 0 xonsh/parsers/base.py | 3292 ---------------- xonsh/parsers/context_check.py | 85 - xonsh/parsers/v34.py | 157 - xonsh/parsers/v35.py | 148 - xonsh/parsers/v36.py | 43 - xonsh/platform.py | 631 --- xonsh/ply/.gitignore | 9 - xonsh/ply/.travis.yml | 10 - xonsh/ply/CHANGES | 1426 ------- xonsh/ply/CONTRIBUTING.md | 18 - xonsh/ply/Makefile | 17 - xonsh/ply/README.md | 274 -- xonsh/ply/__init__.py | 0 xonsh/ply/doc/internal.html | 874 ---- xonsh/ply/doc/makedoc.py | 194 - xonsh/ply/doc/ply.html | 3496 ---------------- xonsh/ply/example/BASIC/README | 79 - xonsh/ply/example/BASIC/basic.py | 66 - xonsh/ply/example/BASIC/basiclex.py | 61 - xonsh/ply/example/BASIC/basiclog.py | 74 - xonsh/ply/example/BASIC/basinterp.py | 496 --- xonsh/ply/example/BASIC/basparse.py | 474 --- xonsh/ply/example/BASIC/dim.bas | 14 - xonsh/ply/example/BASIC/func.bas | 5 - xonsh/ply/example/BASIC/gcd.bas | 22 - xonsh/ply/example/BASIC/gosub.bas | 13 - xonsh/ply/example/BASIC/hello.bas | 4 - xonsh/ply/example/BASIC/linear.bas | 17 - xonsh/ply/example/BASIC/maxsin.bas | 12 - xonsh/ply/example/BASIC/powers.bas | 13 - xonsh/ply/example/BASIC/rand.bas | 4 - xonsh/ply/example/BASIC/sales.bas | 20 - xonsh/ply/example/BASIC/sears.bas | 18 - xonsh/ply/example/BASIC/sqrt1.bas | 5 - xonsh/ply/example/BASIC/sqrt2.bas | 4 - xonsh/ply/example/GardenSnake/GardenSnake.py | 777 ---- xonsh/ply/example/GardenSnake/README | 5 - xonsh/ply/example/README | 10 - xonsh/ply/example/ansic/README | 2 - xonsh/ply/example/ansic/clex.py | 168 - xonsh/ply/example/ansic/cparse.py | 1048 ----- xonsh/ply/example/calc/calc.py | 123 - xonsh/ply/example/calcdebug/calc.py | 129 - xonsh/ply/example/calceof/calc.py | 132 - xonsh/ply/example/classcalc/calc.py | 165 - xonsh/ply/example/cleanup.sh | 2 - xonsh/ply/example/closurecalc/calc.py | 132 - xonsh/ply/example/hedit/hedit.py | 48 - xonsh/ply/example/newclasscalc/calc.py | 167 - xonsh/ply/example/optcalc/README | 9 - xonsh/ply/example/optcalc/calc.py | 134 - xonsh/ply/example/unicalc/calc.py | 133 - xonsh/ply/example/yply/README | 41 - xonsh/ply/example/yply/ylex.py | 119 - xonsh/ply/example/yply/yparse.py | 244 -- xonsh/ply/example/yply/yply.py | 51 - xonsh/ply/ply/__init__.py | 5 - xonsh/ply/ply/cpp.py | 974 ----- xonsh/ply/ply/ctokens.py | 127 - xonsh/ply/ply/lex.py | 1099 ------ xonsh/ply/ply/yacc.py | 3504 ----------------- xonsh/ply/ply/ygen.py | 69 - xonsh/ply/setup.md | 40 - xonsh/ply/test/README | 8 - xonsh/ply/test/calclex.py | 49 - xonsh/ply/test/cleanup.sh | 4 - xonsh/ply/test/lex_closure.py | 54 - xonsh/ply/test/lex_doc1.py | 26 - xonsh/ply/test/lex_dup1.py | 29 - xonsh/ply/test/lex_dup2.py | 33 - xonsh/ply/test/lex_dup3.py | 31 - xonsh/ply/test/lex_empty.py | 20 - xonsh/ply/test/lex_error1.py | 24 - xonsh/ply/test/lex_error2.py | 26 - xonsh/ply/test/lex_error3.py | 27 - xonsh/ply/test/lex_error4.py | 27 - xonsh/ply/test/lex_hedit.py | 47 - xonsh/ply/test/lex_ignore.py | 31 - xonsh/ply/test/lex_ignore2.py | 29 - xonsh/ply/test/lex_literal1.py | 25 - xonsh/ply/test/lex_literal2.py | 25 - xonsh/ply/test/lex_literal3.py | 26 - xonsh/ply/test/lex_many_tokens.py | 27 - xonsh/ply/test/lex_module.py | 10 - xonsh/ply/test/lex_module_import.py | 42 - xonsh/ply/test/lex_object.py | 55 - xonsh/ply/test/lex_opt_alias.py | 54 - xonsh/ply/test/lex_optimize.py | 50 - xonsh/ply/test/lex_optimize2.py | 50 - xonsh/ply/test/lex_optimize3.py | 52 - xonsh/ply/test/lex_optimize4.py | 26 - xonsh/ply/test/lex_re1.py | 27 - xonsh/ply/test/lex_re2.py | 27 - xonsh/ply/test/lex_re3.py | 29 - xonsh/ply/test/lex_rule1.py | 27 - xonsh/ply/test/lex_rule2.py | 29 - xonsh/ply/test/lex_rule3.py | 27 - xonsh/ply/test/lex_state1.py | 40 - xonsh/ply/test/lex_state2.py | 40 - xonsh/ply/test/lex_state3.py | 42 - xonsh/ply/test/lex_state4.py | 41 - xonsh/ply/test/lex_state5.py | 40 - xonsh/ply/test/lex_state_noerror.py | 39 - xonsh/ply/test/lex_state_norule.py | 40 - xonsh/ply/test/lex_state_try.py | 45 - xonsh/ply/test/lex_token1.py | 19 - xonsh/ply/test/lex_token2.py | 22 - xonsh/ply/test/lex_token3.py | 24 - xonsh/ply/test/lex_token4.py | 26 - xonsh/ply/test/lex_token5.py | 31 - xonsh/ply/test/lex_token_dup.py | 29 - xonsh/ply/test/pkg_test1/__init__.py | 9 - xonsh/ply/test/pkg_test1/parsing/__init__.py | 0 xonsh/ply/test/pkg_test1/parsing/calclex.py | 47 - xonsh/ply/test/pkg_test1/parsing/calcparse.py | 66 - xonsh/ply/test/pkg_test2/__init__.py | 9 - xonsh/ply/test/pkg_test2/parsing/__init__.py | 0 xonsh/ply/test/pkg_test2/parsing/calclex.py | 47 - xonsh/ply/test/pkg_test2/parsing/calcparse.py | 66 - xonsh/ply/test/pkg_test3/__init__.py | 9 - .../ply/test/pkg_test3/generated/__init__.py | 0 xonsh/ply/test/pkg_test3/parsing/__init__.py | 0 xonsh/ply/test/pkg_test3/parsing/calclex.py | 47 - xonsh/ply/test/pkg_test3/parsing/calcparse.py | 66 - xonsh/ply/test/pkg_test4/__init__.py | 25 - xonsh/ply/test/pkg_test4/parsing/__init__.py | 0 xonsh/ply/test/pkg_test4/parsing/calclex.py | 47 - xonsh/ply/test/pkg_test4/parsing/calcparse.py | 66 - xonsh/ply/test/pkg_test5/__init__.py | 9 - xonsh/ply/test/pkg_test5/parsing/__init__.py | 0 xonsh/ply/test/pkg_test5/parsing/calclex.py | 48 - xonsh/ply/test/pkg_test5/parsing/calcparse.py | 67 - xonsh/ply/test/pkg_test6/__init__.py | 9 - xonsh/ply/test/pkg_test6/parsing/__init__.py | 0 xonsh/ply/test/pkg_test6/parsing/calclex.py | 48 - xonsh/ply/test/pkg_test6/parsing/calcparse.py | 33 - .../ply/test/pkg_test6/parsing/expression.py | 31 - xonsh/ply/test/pkg_test6/parsing/statement.py | 9 - xonsh/ply/test/test_cpp_nonascii.c | 2 - xonsh/ply/test/testcpp.py | 153 - xonsh/ply/test/testlex.py | 682 ---- xonsh/ply/test/testyacc.py | 452 --- xonsh/ply/test/yacc_badargs.py | 68 - xonsh/ply/test/yacc_badid.py | 77 - xonsh/ply/test/yacc_badprec.py | 64 - xonsh/ply/test/yacc_badprec2.py | 68 - xonsh/ply/test/yacc_badprec3.py | 68 - xonsh/ply/test/yacc_badrule.py | 68 - xonsh/ply/test/yacc_badtok.py | 68 - xonsh/ply/test/yacc_dup.py | 68 - xonsh/ply/test/yacc_error1.py | 68 - xonsh/ply/test/yacc_error2.py | 68 - xonsh/ply/test/yacc_error3.py | 67 - xonsh/ply/test/yacc_error4.py | 72 - xonsh/ply/test/yacc_error5.py | 94 - xonsh/ply/test/yacc_error6.py | 80 - xonsh/ply/test/yacc_error7.py | 80 - xonsh/ply/test/yacc_inf.py | 56 - xonsh/ply/test/yacc_literal.py | 69 - xonsh/ply/test/yacc_misplaced.py | 68 - xonsh/ply/test/yacc_missing1.py | 68 - xonsh/ply/test/yacc_nested.py | 33 - xonsh/ply/test/yacc_nodoc.py | 67 - xonsh/ply/test/yacc_noerror.py | 66 - xonsh/ply/test/yacc_nop.py | 68 - xonsh/ply/test/yacc_notfunc.py | 66 - xonsh/ply/test/yacc_notok.py | 67 - xonsh/ply/test/yacc_prec1.py | 68 - xonsh/ply/test/yacc_rr.py | 72 - xonsh/ply/test/yacc_rr_unused.py | 30 - xonsh/ply/test/yacc_simple.py | 68 - xonsh/ply/test/yacc_sr.py | 63 - xonsh/ply/test/yacc_term1.py | 68 - xonsh/ply/test/yacc_unicode_literals.py | 70 - xonsh/ply/test/yacc_unused.py | 77 - xonsh/ply/test/yacc_unused_rule.py | 72 - xonsh/ply/test/yacc_uprec.py | 63 - xonsh/ply/test/yacc_uprec2.py | 63 - xonsh/pretty.py | 461 +-- xonsh/proc.py | 2432 +----------- xonsh/prompt/__init__.py | 29 - xonsh/prompt/base.py | 219 -- xonsh/prompt/cwd.py | 91 - xonsh/prompt/env.py | 53 - xonsh/prompt/gitstatus.py | 199 - xonsh/prompt/job.py | 15 - xonsh/prompt/vc.py | 260 -- ...mpleter.py => prompt_toolkit_completer.py} | 25 +- xonsh/prompt_toolkit_history.py | 101 + xonsh/prompt_toolkit_key_bindings.py | 42 + xonsh/prompt_toolkit_shell.py | 126 + xonsh/ptk/__init__.py | 0 xonsh/ptk/completer.py | 108 - xonsh/ptk/history.py | 80 - xonsh/ptk/key_bindings.py | 366 -- xonsh/ptk/shell.py | 337 -- xonsh/ptk/shortcuts.py | 131 - xonsh/ptk2/__init__.py | 8 - xonsh/ptk2/history.py | 53 - xonsh/ptk2/key_bindings.py | 352 -- xonsh/ptk2/shell.py | 350 -- xonsh/pyghooks.py | 1473 +------ xonsh/pygments_cache.py | 455 --- xonsh/pytest_plugin.py | 68 - xonsh/readline_shell.py | 569 +-- xonsh/replay.py | 126 +- xonsh/shell.py | 238 +- xonsh/style_tools.py | 446 --- xonsh/teepty.py | 331 ++ xonsh/timings.py | 273 +- xonsh/tokenize.py | 1210 ------ xonsh/tools.py | 2403 ++--------- xonsh/tracer.py | 240 -- xonsh/winutils.py | 549 --- xonsh/wizard.py | 869 ---- xonsh/xonfig.py | 759 ---- xonsh/xonshrc | 9 - xonsh/xontribs.json | 312 -- xonsh/xontribs.py | 178 - xonsh/xoreutils/__init__.py | 2 - xonsh/xoreutils/_which.py | 367 -- xonsh/xoreutils/cat.py | 164 - xonsh/xoreutils/echo.py | 44 - xonsh/xoreutils/pwd.py | 28 - xonsh/xoreutils/tee.py | 59 - xonsh/xoreutils/tty.py | 45 - xonsh/xoreutils/uptime.py | 284 -- xonsh/xoreutils/util.py | 19 - xonsh/xoreutils/which.py | 193 - xonsh/xoreutils/yes.py | 25 - 291 files changed, 7743 insertions(+), 58596 deletions(-) delete mode 100644 xonsh/__main__.py delete mode 100644 xonsh/ansi_colors.py delete mode 100644 xonsh/codecache.py delete mode 100644 xonsh/color_tools.py delete mode 100644 xonsh/commands_cache.py delete mode 100644 xonsh/completers/__init__.py delete mode 100644 xonsh/completers/_aliases.py delete mode 100644 xonsh/completers/base.py delete mode 100644 xonsh/completers/bash.py delete mode 100644 xonsh/completers/bash_completion.py delete mode 100644 xonsh/completers/commands.py delete mode 100644 xonsh/completers/completer.py delete mode 100644 xonsh/completers/dirs.py delete mode 100644 xonsh/completers/init.py delete mode 100644 xonsh/completers/man.py delete mode 100644 xonsh/completers/path.py delete mode 100644 xonsh/completers/pip.py delete mode 100644 xonsh/completers/python.py delete mode 100644 xonsh/completers/tools.py delete mode 100644 xonsh/completers/xompletions.py delete mode 100644 xonsh/contexts.py delete mode 100644 xonsh/data/data.txt delete mode 100644 xonsh/dumb_shell.py delete mode 100644 xonsh/events.py delete mode 100644 xonsh/fs.py create mode 100644 xonsh/history.py delete mode 100644 xonsh/history/__init__.py delete mode 100644 xonsh/history/base.py delete mode 100644 xonsh/history/dummy.py delete mode 100644 xonsh/history/json.py delete mode 100644 xonsh/history/main.py delete mode 100644 xonsh/history/sqlite.py delete mode 100644 xonsh/jsonutils.py delete mode 100644 xonsh/jupyter_kernel.py delete mode 100644 xonsh/jupyter_shell.py delete mode 100644 xonsh/lazyasd.py delete mode 100644 xonsh/lazyimps.py delete mode 100644 xonsh/macutils.py delete mode 100644 xonsh/parsers/__init__.py delete mode 100644 xonsh/parsers/base.py delete mode 100644 xonsh/parsers/context_check.py delete mode 100644 xonsh/parsers/v34.py delete mode 100644 xonsh/parsers/v35.py delete mode 100644 xonsh/parsers/v36.py delete mode 100644 xonsh/platform.py delete mode 100644 xonsh/ply/.gitignore delete mode 100644 xonsh/ply/.travis.yml delete mode 100644 xonsh/ply/CHANGES delete mode 100644 xonsh/ply/CONTRIBUTING.md delete mode 100644 xonsh/ply/Makefile delete mode 100644 xonsh/ply/README.md delete mode 100644 xonsh/ply/__init__.py delete mode 100644 xonsh/ply/doc/internal.html delete mode 100644 xonsh/ply/doc/makedoc.py delete mode 100644 xonsh/ply/doc/ply.html delete mode 100644 xonsh/ply/example/BASIC/README delete mode 100644 xonsh/ply/example/BASIC/basic.py delete mode 100644 xonsh/ply/example/BASIC/basiclex.py delete mode 100644 xonsh/ply/example/BASIC/basiclog.py delete mode 100644 xonsh/ply/example/BASIC/basinterp.py delete mode 100644 xonsh/ply/example/BASIC/basparse.py delete mode 100644 xonsh/ply/example/BASIC/dim.bas delete mode 100644 xonsh/ply/example/BASIC/func.bas delete mode 100644 xonsh/ply/example/BASIC/gcd.bas delete mode 100644 xonsh/ply/example/BASIC/gosub.bas delete mode 100644 xonsh/ply/example/BASIC/hello.bas delete mode 100644 xonsh/ply/example/BASIC/linear.bas delete mode 100644 xonsh/ply/example/BASIC/maxsin.bas delete mode 100644 xonsh/ply/example/BASIC/powers.bas delete mode 100644 xonsh/ply/example/BASIC/rand.bas delete mode 100644 xonsh/ply/example/BASIC/sales.bas delete mode 100644 xonsh/ply/example/BASIC/sears.bas delete mode 100644 xonsh/ply/example/BASIC/sqrt1.bas delete mode 100644 xonsh/ply/example/BASIC/sqrt2.bas delete mode 100644 xonsh/ply/example/GardenSnake/GardenSnake.py delete mode 100644 xonsh/ply/example/GardenSnake/README delete mode 100644 xonsh/ply/example/README delete mode 100644 xonsh/ply/example/ansic/README delete mode 100644 xonsh/ply/example/ansic/clex.py delete mode 100644 xonsh/ply/example/ansic/cparse.py delete mode 100644 xonsh/ply/example/calc/calc.py delete mode 100644 xonsh/ply/example/calcdebug/calc.py delete mode 100644 xonsh/ply/example/calceof/calc.py delete mode 100755 xonsh/ply/example/classcalc/calc.py delete mode 100755 xonsh/ply/example/cleanup.sh delete mode 100644 xonsh/ply/example/closurecalc/calc.py delete mode 100644 xonsh/ply/example/hedit/hedit.py delete mode 100755 xonsh/ply/example/newclasscalc/calc.py delete mode 100644 xonsh/ply/example/optcalc/README delete mode 100644 xonsh/ply/example/optcalc/calc.py delete mode 100644 xonsh/ply/example/unicalc/calc.py delete mode 100644 xonsh/ply/example/yply/README delete mode 100644 xonsh/ply/example/yply/ylex.py delete mode 100644 xonsh/ply/example/yply/yparse.py delete mode 100755 xonsh/ply/example/yply/yply.py delete mode 100644 xonsh/ply/ply/__init__.py delete mode 100644 xonsh/ply/ply/cpp.py delete mode 100644 xonsh/ply/ply/ctokens.py delete mode 100644 xonsh/ply/ply/lex.py delete mode 100644 xonsh/ply/ply/yacc.py delete mode 100644 xonsh/ply/ply/ygen.py delete mode 100644 xonsh/ply/setup.md delete mode 100644 xonsh/ply/test/README delete mode 100644 xonsh/ply/test/calclex.py delete mode 100755 xonsh/ply/test/cleanup.sh delete mode 100644 xonsh/ply/test/lex_closure.py delete mode 100644 xonsh/ply/test/lex_doc1.py delete mode 100644 xonsh/ply/test/lex_dup1.py delete mode 100644 xonsh/ply/test/lex_dup2.py delete mode 100644 xonsh/ply/test/lex_dup3.py delete mode 100644 xonsh/ply/test/lex_empty.py delete mode 100644 xonsh/ply/test/lex_error1.py delete mode 100644 xonsh/ply/test/lex_error2.py delete mode 100644 xonsh/ply/test/lex_error3.py delete mode 100644 xonsh/ply/test/lex_error4.py delete mode 100644 xonsh/ply/test/lex_hedit.py delete mode 100644 xonsh/ply/test/lex_ignore.py delete mode 100644 xonsh/ply/test/lex_ignore2.py delete mode 100644 xonsh/ply/test/lex_literal1.py delete mode 100644 xonsh/ply/test/lex_literal2.py delete mode 100644 xonsh/ply/test/lex_literal3.py delete mode 100644 xonsh/ply/test/lex_many_tokens.py delete mode 100644 xonsh/ply/test/lex_module.py delete mode 100644 xonsh/ply/test/lex_module_import.py delete mode 100644 xonsh/ply/test/lex_object.py delete mode 100644 xonsh/ply/test/lex_opt_alias.py delete mode 100644 xonsh/ply/test/lex_optimize.py delete mode 100644 xonsh/ply/test/lex_optimize2.py delete mode 100644 xonsh/ply/test/lex_optimize3.py delete mode 100644 xonsh/ply/test/lex_optimize4.py delete mode 100644 xonsh/ply/test/lex_re1.py delete mode 100644 xonsh/ply/test/lex_re2.py delete mode 100644 xonsh/ply/test/lex_re3.py delete mode 100644 xonsh/ply/test/lex_rule1.py delete mode 100644 xonsh/ply/test/lex_rule2.py delete mode 100644 xonsh/ply/test/lex_rule3.py delete mode 100644 xonsh/ply/test/lex_state1.py delete mode 100644 xonsh/ply/test/lex_state2.py delete mode 100644 xonsh/ply/test/lex_state3.py delete mode 100644 xonsh/ply/test/lex_state4.py delete mode 100644 xonsh/ply/test/lex_state5.py delete mode 100644 xonsh/ply/test/lex_state_noerror.py delete mode 100644 xonsh/ply/test/lex_state_norule.py delete mode 100644 xonsh/ply/test/lex_state_try.py delete mode 100644 xonsh/ply/test/lex_token1.py delete mode 100644 xonsh/ply/test/lex_token2.py delete mode 100644 xonsh/ply/test/lex_token3.py delete mode 100644 xonsh/ply/test/lex_token4.py delete mode 100644 xonsh/ply/test/lex_token5.py delete mode 100644 xonsh/ply/test/lex_token_dup.py delete mode 100644 xonsh/ply/test/pkg_test1/__init__.py delete mode 100644 xonsh/ply/test/pkg_test1/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test1/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test1/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test2/__init__.py delete mode 100644 xonsh/ply/test/pkg_test2/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test2/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test2/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test3/__init__.py delete mode 100644 xonsh/ply/test/pkg_test3/generated/__init__.py delete mode 100644 xonsh/ply/test/pkg_test3/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test3/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test3/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test4/__init__.py delete mode 100644 xonsh/ply/test/pkg_test4/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test4/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test4/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test5/__init__.py delete mode 100644 xonsh/ply/test/pkg_test5/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test5/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test5/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test6/__init__.py delete mode 100644 xonsh/ply/test/pkg_test6/parsing/__init__.py delete mode 100644 xonsh/ply/test/pkg_test6/parsing/calclex.py delete mode 100644 xonsh/ply/test/pkg_test6/parsing/calcparse.py delete mode 100644 xonsh/ply/test/pkg_test6/parsing/expression.py delete mode 100644 xonsh/ply/test/pkg_test6/parsing/statement.py delete mode 100644 xonsh/ply/test/test_cpp_nonascii.c delete mode 100644 xonsh/ply/test/testcpp.py delete mode 100755 xonsh/ply/test/testlex.py delete mode 100644 xonsh/ply/test/testyacc.py delete mode 100644 xonsh/ply/test/yacc_badargs.py delete mode 100644 xonsh/ply/test/yacc_badid.py delete mode 100644 xonsh/ply/test/yacc_badprec.py delete mode 100644 xonsh/ply/test/yacc_badprec2.py delete mode 100644 xonsh/ply/test/yacc_badprec3.py delete mode 100644 xonsh/ply/test/yacc_badrule.py delete mode 100644 xonsh/ply/test/yacc_badtok.py delete mode 100644 xonsh/ply/test/yacc_dup.py delete mode 100644 xonsh/ply/test/yacc_error1.py delete mode 100644 xonsh/ply/test/yacc_error2.py delete mode 100644 xonsh/ply/test/yacc_error3.py delete mode 100644 xonsh/ply/test/yacc_error4.py delete mode 100644 xonsh/ply/test/yacc_error5.py delete mode 100644 xonsh/ply/test/yacc_error6.py delete mode 100644 xonsh/ply/test/yacc_error7.py delete mode 100644 xonsh/ply/test/yacc_inf.py delete mode 100644 xonsh/ply/test/yacc_literal.py delete mode 100644 xonsh/ply/test/yacc_misplaced.py delete mode 100644 xonsh/ply/test/yacc_missing1.py delete mode 100644 xonsh/ply/test/yacc_nested.py delete mode 100644 xonsh/ply/test/yacc_nodoc.py delete mode 100644 xonsh/ply/test/yacc_noerror.py delete mode 100644 xonsh/ply/test/yacc_nop.py delete mode 100644 xonsh/ply/test/yacc_notfunc.py delete mode 100644 xonsh/ply/test/yacc_notok.py delete mode 100644 xonsh/ply/test/yacc_prec1.py delete mode 100644 xonsh/ply/test/yacc_rr.py delete mode 100644 xonsh/ply/test/yacc_rr_unused.py delete mode 100644 xonsh/ply/test/yacc_simple.py delete mode 100644 xonsh/ply/test/yacc_sr.py delete mode 100644 xonsh/ply/test/yacc_term1.py delete mode 100644 xonsh/ply/test/yacc_unicode_literals.py delete mode 100644 xonsh/ply/test/yacc_unused.py delete mode 100644 xonsh/ply/test/yacc_unused_rule.py delete mode 100644 xonsh/ply/test/yacc_uprec.py delete mode 100644 xonsh/ply/test/yacc_uprec2.py delete mode 100644 xonsh/prompt/__init__.py delete mode 100644 xonsh/prompt/base.py delete mode 100644 xonsh/prompt/cwd.py delete mode 100644 xonsh/prompt/env.py delete mode 100644 xonsh/prompt/gitstatus.py delete mode 100644 xonsh/prompt/job.py delete mode 100644 xonsh/prompt/vc.py rename xonsh/{ptk2/completer.py => prompt_toolkit_completer.py} (66%) create mode 100644 xonsh/prompt_toolkit_history.py create mode 100644 xonsh/prompt_toolkit_key_bindings.py create mode 100644 xonsh/prompt_toolkit_shell.py delete mode 100644 xonsh/ptk/__init__.py delete mode 100644 xonsh/ptk/completer.py delete mode 100644 xonsh/ptk/history.py delete mode 100644 xonsh/ptk/key_bindings.py delete mode 100644 xonsh/ptk/shell.py delete mode 100644 xonsh/ptk/shortcuts.py delete mode 100644 xonsh/ptk2/__init__.py delete mode 100644 xonsh/ptk2/history.py delete mode 100644 xonsh/ptk2/key_bindings.py delete mode 100644 xonsh/ptk2/shell.py delete mode 100644 xonsh/pygments_cache.py delete mode 100644 xonsh/pytest_plugin.py delete mode 100644 xonsh/style_tools.py create mode 100644 xonsh/teepty.py delete mode 100644 xonsh/tokenize.py delete mode 100644 xonsh/tracer.py delete mode 100644 xonsh/winutils.py delete mode 100644 xonsh/wizard.py delete mode 100644 xonsh/xonfig.py delete mode 100644 xonsh/xonshrc delete mode 100644 xonsh/xontribs.json delete mode 100644 xonsh/xontribs.py delete mode 100644 xonsh/xoreutils/__init__.py delete mode 100644 xonsh/xoreutils/_which.py delete mode 100644 xonsh/xoreutils/cat.py delete mode 100644 xonsh/xoreutils/echo.py delete mode 100644 xonsh/xoreutils/pwd.py delete mode 100644 xonsh/xoreutils/tee.py delete mode 100644 xonsh/xoreutils/tty.py delete mode 100644 xonsh/xoreutils/uptime.py delete mode 100644 xonsh/xoreutils/util.py delete mode 100644 xonsh/xoreutils/which.py delete mode 100644 xonsh/xoreutils/yes.py diff --git a/setup.py b/setup.py index fadbdbe..8ddd4fc 100755 --- a/setup.py +++ b/setup.py @@ -61,9 +61,10 @@ def run(self): def main(): - if sys.version_info < (3, 4): - print('gitsome requires at least Python 3.4.') - sys.exit(1) + python3 = sys.version_info[0] == 3 + python34_or_35 = python3 and sys.version_info[1] in (4, 5) + if not python34_or_35: + sys.exit('gitsome currently requires Python 3.4 or 3.5') try: if '--name' not in sys.argv: print(logo) @@ -87,8 +88,6 @@ def main(): 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', 'Topic :: Software Development', 'Topic :: Software Development :: Libraries :: Python Modules', ], @@ -104,7 +103,7 @@ def main(): skw['install_requires'] = [ 'numpydoc>=0.5,<1.0', 'ply>=3.4,<4.0', - 'prompt_toolkit>=2.0.0,<2.1.0', + 'prompt-toolkit>=1.0.0,<1.1.0', 'requests>=2.8.1,<3.0.0', 'colorama>=0.3.3,<1.0.0', 'click>=5.1,<7.0', diff --git a/tests/test_completer.py b/tests/test_completer.py index ddd7a42..cf19b2f 100644 --- a/tests/test_completer.py +++ b/tests/test_completer.py @@ -35,16 +35,16 @@ def create_completer_event(self): def _get_completions(self, command): position = len(command) - result = self.completer.get_completions( + result = set(self.completer.get_completions( Document(text=command, cursor_position=position), - self.completer_event) + self.completer_event)) return result def verify_completions(self, commands, expected): - result = [] + result = set() for command in commands: # Call the AWS CLI autocompleter - result.extend(self._get_completions(command)) + result.update(self._get_completions(command)) result_texts = [] for item in result: # Each result item is a Completion object, @@ -59,13 +59,13 @@ def verify_completions(self, commands, expected): def test_blank(self): text = '' - expected = [] + expected = set([]) result = self._get_completions(text) assert result == expected def test_no_completions(self): text = 'foo' - expected = [] + expected = set([]) result = self._get_completions(text) assert result == expected @@ -116,8 +116,8 @@ def test_build_completions_with_meta(self): result = self.completer.build_completions_with_meta('git ad', 'ad', ['add']) - assert result[0].display_meta_text == 'Add file contents to the index.' + assert result[0].display_meta == 'Add file contents to the index.' result = self.completer.build_completions_with_meta('git-alia', 'git-alia', ['git-alias']) - assert result[0].display_meta_text == 'Define, search and show aliases.' + assert result[0].display_meta == 'Define, search and show aliases.' diff --git a/tox.ini b/tox.ini index f77ba35..fc40c1c 100644 --- a/tox.ini +++ b/tox.ini @@ -4,7 +4,7 @@ # and then run "tox" from this directory. [tox] -envlist = py34, py35, py36, py37 +envlist = py34, py35 [testenv] passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH diff --git a/xonsh/__init__.py b/xonsh/__init__.py index a7e36dd..9dd16a3 100644 --- a/xonsh/__init__.py +++ b/xonsh/__init__.py @@ -1,104 +1 @@ -__version__ = "0.8.10" - - -# amalgamate exclude jupyter_kernel parser_table parser_test_table pyghooks -# amalgamate exclude winutils wizard pytest_plugin fs macutils pygments_cache -# amalgamate exclude jupyter_shell -import os as _os - -if _os.getenv("XONSH_DEBUG", ""): - pass -else: - import sys as _sys - - try: - from xonsh import __amalgam__ - - completer = __amalgam__ - _sys.modules["xonsh.completer"] = __amalgam__ - contexts = __amalgam__ - _sys.modules["xonsh.contexts"] = __amalgam__ - lazyasd = __amalgam__ - _sys.modules["xonsh.lazyasd"] = __amalgam__ - lazyjson = __amalgam__ - _sys.modules["xonsh.lazyjson"] = __amalgam__ - color_tools = __amalgam__ - _sys.modules["xonsh.color_tools"] = __amalgam__ - platform = __amalgam__ - _sys.modules["xonsh.platform"] = __amalgam__ - pretty = __amalgam__ - _sys.modules["xonsh.pretty"] = __amalgam__ - codecache = __amalgam__ - _sys.modules["xonsh.codecache"] = __amalgam__ - lazyimps = __amalgam__ - _sys.modules["xonsh.lazyimps"] = __amalgam__ - parser = __amalgam__ - _sys.modules["xonsh.parser"] = __amalgam__ - tokenize = __amalgam__ - _sys.modules["xonsh.tokenize"] = __amalgam__ - tools = __amalgam__ - _sys.modules["xonsh.tools"] = __amalgam__ - ansi_colors = __amalgam__ - _sys.modules["xonsh.ansi_colors"] = __amalgam__ - ast = __amalgam__ - _sys.modules["xonsh.ast"] = __amalgam__ - commands_cache = __amalgam__ - _sys.modules["xonsh.commands_cache"] = __amalgam__ - diff_history = __amalgam__ - _sys.modules["xonsh.diff_history"] = __amalgam__ - events = __amalgam__ - _sys.modules["xonsh.events"] = __amalgam__ - foreign_shells = __amalgam__ - _sys.modules["xonsh.foreign_shells"] = __amalgam__ - jobs = __amalgam__ - _sys.modules["xonsh.jobs"] = __amalgam__ - jsonutils = __amalgam__ - _sys.modules["xonsh.jsonutils"] = __amalgam__ - lexer = __amalgam__ - _sys.modules["xonsh.lexer"] = __amalgam__ - openpy = __amalgam__ - _sys.modules["xonsh.openpy"] = __amalgam__ - style_tools = __amalgam__ - _sys.modules["xonsh.style_tools"] = __amalgam__ - xontribs = __amalgam__ - _sys.modules["xonsh.xontribs"] = __amalgam__ - dirstack = __amalgam__ - _sys.modules["xonsh.dirstack"] = __amalgam__ - inspectors = __amalgam__ - _sys.modules["xonsh.inspectors"] = __amalgam__ - proc = __amalgam__ - _sys.modules["xonsh.proc"] = __amalgam__ - shell = __amalgam__ - _sys.modules["xonsh.shell"] = __amalgam__ - timings = __amalgam__ - _sys.modules["xonsh.timings"] = __amalgam__ - xonfig = __amalgam__ - _sys.modules["xonsh.xonfig"] = __amalgam__ - base_shell = __amalgam__ - _sys.modules["xonsh.base_shell"] = __amalgam__ - environ = __amalgam__ - _sys.modules["xonsh.environ"] = __amalgam__ - tracer = __amalgam__ - _sys.modules["xonsh.tracer"] = __amalgam__ - readline_shell = __amalgam__ - _sys.modules["xonsh.readline_shell"] = __amalgam__ - replay = __amalgam__ - _sys.modules["xonsh.replay"] = __amalgam__ - aliases = __amalgam__ - _sys.modules["xonsh.aliases"] = __amalgam__ - dumb_shell = __amalgam__ - _sys.modules["xonsh.dumb_shell"] = __amalgam__ - built_ins = __amalgam__ - _sys.modules["xonsh.built_ins"] = __amalgam__ - execer = __amalgam__ - _sys.modules["xonsh.execer"] = __amalgam__ - imphooks = __amalgam__ - _sys.modules["xonsh.imphooks"] = __amalgam__ - main = __amalgam__ - _sys.modules["xonsh.main"] = __amalgam__ - del __amalgam__ - except ImportError: - pass - del _sys -del _os -# amalgamate end +__version__ = '0.2.2' \ No newline at end of file diff --git a/xonsh/__main__.py b/xonsh/__main__.py deleted file mode 100644 index ae6e649..0000000 --- a/xonsh/__main__.py +++ /dev/null @@ -1,3 +0,0 @@ -from xonsh.main import main - -main() diff --git a/xonsh/aliases.py b/xonsh/aliases.py index 3c5d9ed..5db137b 100644 --- a/xonsh/aliases.py +++ b/xonsh/aliases.py @@ -1,827 +1,160 @@ -# -*- coding: utf-8 -*- """Aliases for the xonsh shell.""" import os -import re -import sys -import inspect -import argparse +import shlex import builtins -import collections.abc as cabc +import subprocess +from warnings import warn +from argparse import ArgumentParser -from xonsh.lazyasd import lazyobject -from xonsh.dirstack import cd, pushd, popd, dirs, _get_cwd -from xonsh.environ import locate_binary, make_args_env -from xonsh.foreign_shells import foreign_shell_data -from xonsh.jobs import jobs, fg, bg, clean_jobs -from xonsh.platform import ( - ON_ANACONDA, - ON_DARWIN, - ON_WINDOWS, - ON_FREEBSD, - ON_NETBSD, - ON_DRAGONFLY, -) -from xonsh.tools import ( - XonshError, - argvquote, - escape_windows_cmd_string, - to_bool, - swap_values, - strip_simple_quotes, - ALIAS_KWARG_NAMES, - unthreadable, - print_color, -) -from xonsh.replay import replay_main +from xonsh.dirstack import cd, pushd, popd, dirs +from xonsh.jobs import jobs, fg, bg, kill_all_jobs from xonsh.timings import timeit_alias -from xonsh.xontribs import xontribs_main -from xonsh.ast import isexpression +from xonsh.tools import ON_MAC, ON_WINDOWS, XonshError +from xonsh.history import main as history_alias +from xonsh.replay import main as replay_main +from xonsh.environ import locate_binary -import xonsh.completers._aliases as xca -import xonsh.history.main as xhm -import xonsh.xoreutils.which as xxw - -@lazyobject -def SUB_EXEC_ALIAS_RE(): - return re.compile(r"@\(|\$\(|!\(|\$\[|!\[") - - -class Aliases(cabc.MutableMapping): - """Represents a location to hold and look up aliases.""" - - def __init__(self, *args, **kwargs): - self._raw = {} - self.update(*args, **kwargs) - - def get(self, key, default=None): - """Returns the (possibly modified) value. If the key is not present, - then `default` is returned. - If the value is callable, it is returned without modification. If it - is an iterable of strings it will be evaluated recursively to expand - other aliases, resulting in a new list or a "partially applied" - callable. - """ - val = self._raw.get(key) - if val is None: - return default - elif isinstance(val, cabc.Iterable) or callable(val): - return self.eval_alias(val, seen_tokens={key}) - else: - msg = "alias of {!r} has an inappropriate type: {!r}" - raise TypeError(msg.format(key, val)) - - def eval_alias(self, value, seen_tokens=frozenset(), acc_args=()): - """ - "Evaluates" the alias ``value``, by recursively looking up the leftmost - token and "expanding" if it's also an alias. - - A value like ``["cmd", "arg"]`` might transform like this: - ``> ["cmd", "arg"] -> ["ls", "-al", "arg"] -> callable()`` - where ``cmd=ls -al`` and ``ls`` is an alias with its value being a - callable. The resulting callable will be "partially applied" with - ``["-al", "arg"]``. - """ - # Beware of mutability: default values for keyword args are evaluated - # only once. - if callable(value): - return partial_eval_alias(value, acc_args=acc_args) - else: - expand_path = builtins.__xonsh__.expand_path - token, *rest = map(expand_path, value) - if token in seen_tokens or token not in self._raw: - # ^ Making sure things like `egrep=egrep --color=auto` works, - # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF` - # and `ls=ls --color=auto` - rtn = [token] - rtn.extend(rest) - rtn.extend(acc_args) - return rtn - else: - seen_tokens = seen_tokens | {token} - acc_args = rest + list(acc_args) - return self.eval_alias(self._raw[token], seen_tokens, acc_args) - - def expand_alias(self, line): - """Expands any aliases present in line if alias does not point to a - builtin function and if alias is only a single command. - """ - word = line.split(" ", 1)[0] - if word in builtins.aliases and isinstance(self.get(word), cabc.Sequence): - word_idx = line.find(word) - expansion = " ".join(self.get(word)) - line = line[:word_idx] + expansion + line[word_idx + len(word) :] - return line - - # - # Mutable mapping interface - # - - def __getitem__(self, key): - return self._raw[key] - - def __setitem__(self, key, val): - if isinstance(val, str): - f = "" - if SUB_EXEC_ALIAS_RE.search(val) is not None: - # We have a sub-command, e.g. $(cmd), to evaluate - self._raw[key] = ExecAlias(val, filename=f) - elif isexpression(val): - # expansion substitution - lexer = builtins.__xonsh__.execer.parser.lexer - self._raw[key] = list(map(strip_simple_quotes, lexer.split(val))) - else: - # need to exec alias - self._raw[key] = ExecAlias(val, filename=f) - else: - self._raw[key] = val - - def __delitem__(self, key): - del self._raw[key] - - def update(self, *args, **kwargs): - for key, val in dict(*args, **kwargs).items(): - self[key] = val - - def __iter__(self): - yield from self._raw - - def __len__(self): - return len(self._raw) - - def __str__(self): - return str(self._raw) - - def __repr__(self): - return "{0}.{1}({2})".format( - self.__class__.__module__, self.__class__.__name__, self._raw - ) - - def _repr_pretty_(self, p, cycle): - name = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__) - with p.group(0, name + "(", ")"): - if cycle: - p.text("...") - elif len(self): - p.break_() - p.pretty(dict(self)) - - -class ExecAlias: - """Provides a callable alias for xonsh source code.""" - - def __init__(self, src, filename=""): - """ - Parameters - ---------- - src : str - Source code that will be - """ - self.src = src if src.endswith("\n") else src + "\n" - self.filename = filename - - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - execer = builtins.__xonsh__.execer - frame = stack[0][0] # execute as though we are at the call site - execer.exec( - self.src, glbs=frame.f_globals, locs=frame.f_locals, filename=self.filename - ) - - def __repr__(self): - return "ExecAlias({0!r}, filename={1!r})".format(self.src, self.filename) - - -class PartialEvalAliasBase: - """Partially evaluated alias.""" - - def __init__(self, f, acc_args=()): - """ - Parameters - ---------- - f : callable - A function to dispatch to. - acc_args : sequence of strings, optional - Additional arguments to prepent to the argument list passed in - when the alias is called. - """ - self.f = f - self.acc_args = acc_args - self.__name__ = getattr(f, "__name__", self.__class__.__name__) - - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin, stdout, stderr, spec, stack) - - def __repr__(self): - return "{name}({f!r}, acc_args={acc_args!r})".format( - name=self.__class__.__name__, f=self.f, acc_args=self.acc_args - ) - - -class PartialEvalAlias0(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - if args: - msg = "callable alias {f!r} takes no arguments, but {args!f} provided. " - msg += "Of these {acc_args!r} were partially applied." - raise XonshError(msg.format(f=self.f, args=args, acc_args=self.acc_args)) - return self.f() - - -class PartialEvalAlias1(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args) - - -class PartialEvalAlias2(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin) - - -class PartialEvalAlias3(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin, stdout) - - -class PartialEvalAlias4(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin, stdout, stderr) - - -class PartialEvalAlias5(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin, stdout, stderr, spec) - - -class PartialEvalAlias6(PartialEvalAliasBase): - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args = list(self.acc_args) + args - return self.f(args, stdin, stdout, stderr, spec, stack) - - -PARTIAL_EVAL_ALIASES = ( - PartialEvalAlias0, - PartialEvalAlias1, - PartialEvalAlias2, - PartialEvalAlias3, - PartialEvalAlias4, - PartialEvalAlias5, - PartialEvalAlias6, -) - - -def partial_eval_alias(f, acc_args=()): - """Dispatches the appropriate eval alias based on the number of args to the original callable alias - and how many arguments to apply. - """ - # no partial needed if no extra args - if not acc_args: - return f - # need to dispatch - numargs = 0 - for name, param in inspect.signature(f).parameters.items(): - if ( - param.kind == param.POSITIONAL_ONLY - or param.kind == param.POSITIONAL_OR_KEYWORD - ): - numargs += 1 - elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY: - numargs += 1 - if numargs < 7: - return PARTIAL_EVAL_ALIASES[numargs](f, acc_args=acc_args) - else: - e = "Expected proxy with 6 or fewer arguments for {}, not {}" - raise XonshError(e.format(", ".join(ALIAS_KWARG_NAMES), numargs)) - - -# -# Actual aliases below -# - - -def xonsh_exit(args, stdin=None): +def exit(args, stdin=None): # pylint:disable=redefined-builtin,W0622 """Sends signal to exit shell.""" - if not clean_jobs(): - # Do not exit if jobs not cleaned up - return None, None - builtins.__xonsh__.exit = True + builtins.__xonsh_exit__ = True + kill_all_jobs() print() # gimme a newline return None, None -def xonsh_reset(args, stdin=None): - """ Clears __xonsh__.ctx""" - builtins.__xonsh__.ctx.clear() - - -@lazyobject -def _SOURCE_FOREIGN_PARSER(): - desc = "Sources a file written in a foreign shell language." - parser = argparse.ArgumentParser("source-foreign", description=desc) - parser.add_argument("shell", help="Name or path to the foreign shell") - parser.add_argument( - "files_or_code", - nargs="+", - help="file paths to source or code in the target " "language.", - ) - parser.add_argument( - "-i", - "--interactive", - type=to_bool, - default=True, - help="whether the sourced shell should be interactive", - dest="interactive", - ) - parser.add_argument( - "-l", - "--login", - type=to_bool, - default=False, - help="whether the sourced shell should be login", - dest="login", - ) - parser.add_argument( - "--envcmd", default=None, dest="envcmd", help="command to print environment" - ) - parser.add_argument( - "--aliascmd", default=None, dest="aliascmd", help="command to print aliases" - ) - parser.add_argument( - "--extra-args", - default=(), - dest="extra_args", - type=(lambda s: tuple(s.split())), - help="extra arguments needed to run the shell", - ) - parser.add_argument( - "-s", - "--safe", - type=to_bool, - default=True, - help="whether the source shell should be run safely, " - "and not raise any errors, even if they occur.", - dest="safe", - ) - parser.add_argument( - "-p", - "--prevcmd", - default=None, - dest="prevcmd", - help="command(s) to run before any other commands, " - "replaces traditional source.", - ) - parser.add_argument( - "--postcmd", - default="", - dest="postcmd", - help="command(s) to run after all other commands", - ) - parser.add_argument( - "--funcscmd", - default=None, - dest="funcscmd", - help="code to find locations of all native functions " "in the shell language.", - ) - parser.add_argument( - "--sourcer", - default=None, - dest="sourcer", - help="the source command in the target shell " "language, default: source.", - ) - parser.add_argument( - "--use-tmpfile", - type=to_bool, - default=False, - help="whether the commands for source shell should be " - "written to a temporary file.", - dest="use_tmpfile", - ) - parser.add_argument( - "--seterrprevcmd", - default=None, - dest="seterrprevcmd", - help="command(s) to set exit-on-error before any" "other commands.", - ) - parser.add_argument( - "--seterrpostcmd", - default=None, - dest="seterrpostcmd", - help="command(s) to set exit-on-error after all" "other commands.", - ) - parser.add_argument( - "--overwrite-aliases", - default=False, - action="store_true", - dest="overwrite_aliases", - help="flag for whether or not sourced aliases should " - "replace the current xonsh aliases.", - ) - parser.add_argument( - "--suppress-skip-message", - default=None, - action="store_true", - dest="suppress_skip_message", - help="flag for whether or not skip messages should be suppressed.", - ) - parser.add_argument( - "--show", - default=False, - action="store_true", - dest="show", - help="Will show the script output.", - ) - parser.add_argument( - "-d", - "--dry-run", - default=False, - action="store_true", - dest="dryrun", - help="Will not actually source the file.", - ) - return parser - - -def source_foreign(args, stdin=None, stdout=None, stderr=None): - """Sources a file written in a foreign shell language.""" - env = builtins.__xonsh__.env - ns = _SOURCE_FOREIGN_PARSER.parse_args(args) - ns.suppress_skip_message = ( - env.get("FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE") - if ns.suppress_skip_message is None - else ns.suppress_skip_message - ) - if ns.prevcmd is not None: - pass # don't change prevcmd if given explicitly - elif os.path.isfile(ns.files_or_code[0]): - # we have filename to source - ns.prevcmd = '{} "{}"'.format(ns.sourcer, '" "'.join(ns.files_or_code)) - elif ns.prevcmd is None: - ns.prevcmd = " ".join(ns.files_or_code) # code to run, no files - foreign_shell_data.cache_clear() # make sure that we don't get prev src - fsenv, fsaliases = foreign_shell_data( - shell=ns.shell, - login=ns.login, - interactive=ns.interactive, - envcmd=ns.envcmd, - aliascmd=ns.aliascmd, - extra_args=ns.extra_args, - safe=ns.safe, - prevcmd=ns.prevcmd, - postcmd=ns.postcmd, - funcscmd=ns.funcscmd, - sourcer=ns.sourcer, - use_tmpfile=ns.use_tmpfile, - seterrprevcmd=ns.seterrprevcmd, - seterrpostcmd=ns.seterrpostcmd, - show=ns.show, - dryrun=ns.dryrun, - ) - if fsenv is None: - if ns.dryrun: - return - else: - msg = "xonsh: error: Source failed: {0!r}\n".format(ns.prevcmd) - msg += "xonsh: error: Possible reasons: File not found or syntax error\n" - return (None, msg, 1) - # apply results +def source_bash(args, stdin=None): + """Implements bash's source builtin.""" + import tempfile + env = builtins.__xonsh_env__ denv = env.detype() - for k, v in fsenv.items(): - if k in denv and v == denv[k]: + with tempfile.NamedTemporaryFile(mode='w+t') as f: + args = ' '.join(args) + inp = 'source {0}\nenv >> {1}\n'.format(args, f.name) + try: + subprocess.check_output(['bash'], + input=inp, + env=denv, + stderr=subprocess.PIPE, + universal_newlines=True) + except subprocess.CalledProcessError: + return None, 'could not source {0}\n'.format(args) + f.seek(0) + exported = f.read() + items = [l.split('=', 1) for l in exported.splitlines() if '=' in l] + newenv = dict(items) + for k, v in newenv.items(): + if k in env and v == denv[k]: continue # no change from original env[k] = v - # Remove any env-vars that were unset by the script. - for k in denv: - if k not in fsenv: - env.pop(k, None) - # Update aliases - baliases = builtins.aliases - for k, v in fsaliases.items(): - if k in baliases and v == baliases[k]: - continue # no change from original - elif ns.overwrite_aliases or k not in baliases: - baliases[k] = v - elif ns.suppress_skip_message: - pass - else: - msg = ( - "Skipping application of {0!r} alias from {1!r} " - "since it shares a name with an existing xonsh alias. " - 'Use "--overwrite-alias" option to apply it anyway.' - 'You may prevent this message with "--suppress-skip-message" or ' - '"$FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE = True".' - ) - print(msg.format(k, ns.shell), file=stderr) + return def source_alias(args, stdin=None): """Executes the contents of the provided files in the current context. - If sourced file isn't found in cwd, search for file along $PATH to source - instead. - """ - env = builtins.__xonsh__.env - encoding = env.get("XONSH_ENCODING") - errors = env.get("XONSH_ENCODING_ERRORS") - for i, fname in enumerate(args): - fpath = fname - if not os.path.isfile(fpath): - fpath = locate_binary(fname) - if fpath is None: - if env.get("XONSH_DEBUG"): - print("source: {}: No such file".format(fname), file=sys.stderr) - if i == 0: - raise RuntimeError( - "must source at least one file, " + fname + "does not exist." - ) - break - _, fext = os.path.splitext(fpath) - if fext and fext != ".xsh" and fext != ".py": - raise RuntimeError( - "attempting to source non-xonsh file! If you are " - "trying to source a file in another language, " - "then please use the appropriate source command. " - "For example, source-bash script.sh" - ) - with open(fpath, "r", encoding=encoding, errors=errors) as fp: - src = fp.read() - if not src.endswith("\n"): - src += "\n" - ctx = builtins.__xonsh__.ctx - updates = {"__file__": fpath, "__name__": os.path.abspath(fpath)} - with env.swap(**make_args_env(args[i + 1 :])), swap_values(ctx, updates): - try: - builtins.execx(src, "exec", ctx, filename=fpath) - except Exception: - print_color( - "{RED}You may be attempting to source non-xonsh file! " - "{NO_COLOR}If you are trying to source a file in " - "another language, then please use the appropriate " - "source command. For example, {GREEN}source-bash " - "script.sh{NO_COLOR}", - file=sys.stderr, - ) - raise - - -def source_cmd(args, stdin=None): - """Simple cmd.exe-specific wrapper around source-foreign.""" - args = list(args) - fpath = locate_binary(args[0]) - args[0] = fpath if fpath else args[0] - if not os.path.isfile(args[0]): - return (None, "xonsh: error: File not found: {}\n".format(args[0]), 1) - prevcmd = "call " - prevcmd += " ".join([argvquote(arg, force=True) for arg in args]) - prevcmd = escape_windows_cmd_string(prevcmd) - args.append("--prevcmd={}".format(prevcmd)) - args.insert(0, "cmd") - args.append("--interactive=0") - args.append("--sourcer=call") - args.append("--envcmd=set") - args.append("--seterrpostcmd=if errorlevel 1 exit 1") - args.append("--use-tmpfile=1") - with builtins.__xonsh__.env.swap(PROMPT="$P$G"): - return source_foreign(args, stdin=stdin) + If sourced file isn't found in cwd, search for file along $PATH to source instead""" + for fname in args: + if not os.path.isfile(fname): + fname = locate_binary(fname, cwd=None)[:-1] + with open(fname, 'r') as fp: + execx(fp.read(), 'exec', builtins.__xonsh_ctx__) def xexec(args, stdin=None): - """exec [-h|--help] command [args...] - - exec (also aliased as xexec) uses the os.execvpe() function to - replace the xonsh process with the specified program. This provides - the functionality of the bash 'exec' builtin:: - - >>> exec bash -l -i - bash $ - - The '-h' and '--help' options print this message and exit. - - Notes - ----- - This command **is not** the same as the Python builtin function - exec(). That function is for running Python code. This command, - which shares the same name as the sh-lang statement, is for launching - a command directly in the same process. In the event of a name conflict, - please use the xexec command directly or dive into subprocess mode - explicitly with ![exec command]. For more details, please see - http://xon.sh/faq.html#exec. + """Replaces current process with command specified and passes in the + current xonsh environment. """ - if len(args) == 0: - return (None, "xonsh: exec: no args specified\n", 1) - elif args[0] == "-h" or args[0] == "--help": - return inspect.getdoc(xexec) - else: - denv = builtins.__xonsh__.env.detype() + env = builtins.__xonsh_env__ + denv = env.detype() + if len(args) > 0: try: os.execvpe(args[0], args, denv) except FileNotFoundError as e: - return ( - None, - "xonsh: exec: file not found: {}: {}" "\n".format(e.args[1], args[0]), - 1, - ) - - -class AWitchAWitch(argparse.Action): - SUPPRESS = "==SUPPRESS==" - - def __init__( - self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, **kwargs - ): - super().__init__( - option_strings=option_strings, dest=dest, default=default, nargs=0, **kwargs - ) - - def __call__(self, parser, namespace, values, option_string=None): - import webbrowser - - webbrowser.open("https://github.com/xonsh/xonsh/commit/f49b400") - parser.exit() - - -def xonfig(args, stdin=None): - """Runs the xonsh configuration utility.""" - from xonsh.xonfig import xonfig_main # lazy import - - return xonfig_main(args) - - -@unthreadable -def trace(args, stdin=None, stdout=None, stderr=None, spec=None): - """Runs the xonsh tracer utility.""" - from xonsh.tracer import tracermain # lazy import - - try: - return tracermain(args, stdin=stdin, stdout=stdout, stderr=stderr, spec=spec) - except SystemExit: - pass - - -def showcmd(args, stdin=None): - """usage: showcmd [-h|--help|cmd args] - - Displays the command and arguments as a list of strings that xonsh would - run in subprocess mode. This is useful for determining how xonsh evaluates - your commands and arguments prior to running these commands. - - optional arguments: - -h, --help show this help message and exit - - example: - >>> showcmd echo $USER can't hear "the sea" - ['echo', 'I', "can't", 'hear', 'the sea'] - """ - if len(args) == 0 or (len(args) == 1 and args[0] in {"-h", "--help"}): - print(showcmd.__doc__.rstrip().replace("\n ", "\n")) + return 'xonsh: ' + e.args[1] + ': ' + args[0] + '\n' else: - sys.displayhook(args) + return 'xonsh: exec: no args specified\n' -def detect_xpip_alias(): - """ - Determines the correct invocation to get xonsh's pip - """ - if not getattr(sys, "executable", None): - return lambda args, stdin=None: ( - "", - "Sorry, unable to run pip on your system (missing sys.executable)", - 1, - ) - - basecmd = [sys.executable, "-m", "pip"] - try: - if ON_WINDOWS: - # XXX: Does windows have an installation mode that requires UAC? - return basecmd - elif not os.access(os.path.dirname(sys.executable), os.W_OK): - return ["sudo"] + basecmd - else: - return basecmd - except Exception: - # Something freaky happened, return something that'll probably work - return basecmd +_BANG_N_PARSER = None -def make_default_aliases(): - """Creates a new default aliases dictionary.""" - default_aliases = { - "cd": cd, - "pushd": pushd, - "popd": popd, - "dirs": dirs, - "jobs": jobs, - "fg": fg, - "bg": bg, - "EOF": xonsh_exit, - "exit": xonsh_exit, - "quit": xonsh_exit, - "exec": xexec, - "xexec": xexec, - "source": source_alias, - "source-zsh": ["source-foreign", "zsh", "--sourcer=source"], - "source-bash": ["source-foreign", "bash", "--sourcer=source"], - "source-cmd": source_cmd, - "source-foreign": source_foreign, - "history": xhm.history_main, - "replay": replay_main, - "trace": trace, - "timeit": timeit_alias, - "xonfig": xonfig, - "scp-resume": ["rsync", "--partial", "-h", "--progress", "--rsh=ssh"], - "showcmd": showcmd, - "ipynb": ["jupyter", "notebook", "--no-browser"], - "which": xxw.which, - "xontrib": xontribs_main, - "completer": xca.completer_alias, - "xpip": detect_xpip_alias(), - "xonsh-reset": xonsh_reset, +def bang_n(args, stdin=None): + """Re-runs the nth command as specified in the argument.""" + global _BANG_N_PARSER + if _BANG_N_PARSER is None: + parser = _BANG_N_PARSER = ArgumentParser('!n', usage='!n ', + description="Re-runs the nth command as specified in the argument.") + parser.add_argument('n', type=int, help='the command to rerun, may be negative') + else: + parser = _BANG_N_PARSER + ns = parser.parse_args(args) + hist = builtins.__xonsh_history__ + nhist = len(hist) + n = nhist + ns.n if ns.n < 0 else ns.n + if n < 0 or n >= nhist: + raise IndexError('n out of range, {0} for history len {1}'.format(ns.n, nhist)) + cmd = hist.inps[n] + if cmd.startswith('!'): + raise XonshError('xonsh: error: recursive call to !n') + builtins.execx(cmd) + + +def bang_bang(args, stdin=None): + """Re-runs the last command. Just a wrapper around bang_n.""" + return bang_n(['-1']) + + +DEFAULT_ALIASES = { + 'cd': cd, + 'pushd': pushd, + 'popd': popd, + 'dirs': dirs, + 'jobs': jobs, + 'fg': fg, + 'bg': bg, + 'EOF': exit, + 'exit': exit, + 'quit': exit, + 'xexec': xexec, + 'source': source_alias, + 'source-bash': source_bash, + 'history': history_alias, + 'replay': replay_main, + '!!': bang_bang, + '!n': bang_n, + 'timeit': timeit_alias, + 'scp-resume': ['rsync', '--partial', '-h', '--progress', '--rsh=ssh'], + 'ipynb': ['ipython', 'notebook', '--no-browser'], +} + +if ON_WINDOWS: + # Borrow builtin commands from cmd.exe. + WINDOWS_CMD_ALIASES = { + 'cls', + 'copy', + 'del', + 'dir', + 'erase', + 'md', + 'mkdir', + 'mklink', + 'move', + 'rd', + 'ren', + 'rename', + 'rmdir', + 'time', + 'type', + 'vol' } - if ON_WINDOWS: - # Borrow builtin commands from cmd.exe. - windows_cmd_aliases = { - "cls", - "copy", - "del", - "dir", - "echo", - "erase", - "md", - "mkdir", - "mklink", - "move", - "rd", - "ren", - "rename", - "rmdir", - "time", - "type", - "vol", - } - for alias in windows_cmd_aliases: - default_aliases[alias] = ["cmd", "/c", alias] - default_aliases["call"] = ["source-cmd"] - default_aliases["source-bat"] = ["source-cmd"] - default_aliases["clear"] = "cls" - if ON_ANACONDA: - # Add aliases specific to the Anaconda python distribution. - default_aliases["activate"] = ["source-cmd", "activate.bat"] - default_aliases["deactivate"] = ["source-cmd", "deactivate.bat"] - if not locate_binary("sudo"): - import xonsh.winutils as winutils - def sudo(args): - if len(args) < 1: - print( - "You need to provide an executable to run as " "Administrator." - ) - return - cmd = args[0] - if locate_binary(cmd): - return winutils.sudo(cmd, args[1:]) - elif cmd.lower() in windows_cmd_aliases: - args = ["/D", "/C", "CD", _get_cwd(), "&&"] + args - return winutils.sudo("cmd", args) - else: - msg = 'Cannot find the path for executable "{0}".' - print(msg.format(cmd)) + for alias in WINDOWS_CMD_ALIASES: + DEFAULT_ALIASES[alias] = ['cmd', '/c', alias] - default_aliases["sudo"] = sudo - elif ON_DARWIN: - default_aliases["ls"] = ["ls", "-G"] - elif ON_FREEBSD or ON_DRAGONFLY: - default_aliases["grep"] = ["grep", "--color=auto"] - default_aliases["egrep"] = ["egrep", "--color=auto"] - default_aliases["fgrep"] = ["fgrep", "--color=auto"] - default_aliases["ls"] = ["ls", "-G"] - elif ON_NETBSD: - default_aliases["grep"] = ["grep", "--color=auto"] - default_aliases["egrep"] = ["egrep", "--color=auto"] - default_aliases["fgrep"] = ["fgrep", "--color=auto"] - else: - default_aliases["grep"] = ["grep", "--color=auto"] - default_aliases["egrep"] = ["egrep", "--color=auto"] - default_aliases["fgrep"] = ["fgrep", "--color=auto"] - default_aliases["ls"] = ["ls", "--color=auto", "-v"] - return default_aliases + DEFAULT_ALIASES['which'] = ['where'] + +elif ON_MAC: + DEFAULT_ALIASES['ls'] = ['ls', '-G'] +else: + DEFAULT_ALIASES['grep'] = ['grep', '--color=auto'] + DEFAULT_ALIASES['ls'] = ['ls', '--color=auto', '-v'] diff --git a/xonsh/ansi_colors.py b/xonsh/ansi_colors.py deleted file mode 100644 index 3711ccd..0000000 --- a/xonsh/ansi_colors.py +++ /dev/null @@ -1,1095 +0,0 @@ -"""Tools for helping with ANSI color codes.""" -import re -import sys -import warnings -import builtins - -from xonsh.platform import HAS_PYGMENTS -from xonsh.lazyasd import LazyDict, lazyobject -from xonsh.color_tools import ( - RE_BACKGROUND, - BASE_XONSH_COLORS, - make_palette, - find_closest_color, - rgb2short, - rgb_to_256, - short_to_ints, -) -from xonsh.tools import FORMATTER - - -def ansi_partial_color_format(template, style="default", cmap=None, hide=False): - """Formats a template string but only with respect to the colors. - Another template string is returned, with the color values filled in. - - Parameters - ---------- - template : str - The template string, potentially with color names. - style : str, optional - Style name to look up color map from. - cmap : dict, optional - A color map to use, this will prevent the color map from being - looked up via the style name. - hide : bool, optional - Whether to wrap the color codes in the \\001 and \\002 escape - codes, so that the color codes are not counted against line - length. - - Returns - ------- - A template string with the color values filled in. - """ - try: - return _ansi_partial_color_format_main( - template, style=style, cmap=cmap, hide=hide - ) - except Exception: - return template - - -def _ansi_partial_color_format_main(template, style="default", cmap=None, hide=False): - if cmap is not None: - pass - elif style in ANSI_STYLES: - cmap = ANSI_STYLES[style] - else: - try: # dynamically loading the style - cmap = ansi_style_by_name(style) - except Exception: - msg = "Could not find color style {0!r}, using default." - print(msg.format(style), file=sys.stderr) - builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = "default" - cmap = ANSI_STYLES["default"] - esc = ("\001" if hide else "") + "\033[" - m = "m" + ("\002" if hide else "") - bopen = "{" - bclose = "}" - colon = ":" - expl = "!" - toks = [] - for literal, field, spec, conv in FORMATTER.parse(template): - toks.append(literal) - if field is None: - pass - elif field in cmap: - toks.extend([esc, cmap[field], m]) - elif "#" in field: - field = field.lower() - pre, _, post = field.partition("#") - f_or_b = "38" if RE_BACKGROUND.search(pre) is None else "48" - rgb, _, post = post.partition("_") - c256, _ = rgb_to_256(rgb) - color = f_or_b + ";5;" + c256 - mods = pre + "_" + post - if "underline" in mods: - color = "4;" + color - if "bold" in mods: - color = "1;" + color - toks.extend([esc, color, m]) - elif field is not None: - toks.append(bopen) - toks.append(field) - if conv is not None and len(conv) > 0: - toks.append(expl) - toks.append(conv) - if spec is not None and len(spec) > 0: - toks.append(colon) - toks.append(spec) - toks.append(bclose) - return "".join(toks) - - -def ansi_color_style_names(): - """Returns an iterable of all ANSI color style names.""" - return ANSI_STYLES.keys() - - -def ansi_color_style(style="default"): - """Returns the current color map.""" - if style in ANSI_STYLES: - cmap = ANSI_STYLES[style] - else: - msg = "Could not find color style {0!r}, using default.".format(style) - warnings.warn(msg, RuntimeWarning) - cmap = ANSI_STYLES["default"] - return cmap - - -def ansi_reverse_style(style="default", return_style=False): - """Reverses an ANSI color style mapping so that escape codes map to - colors. Style may either be string or mapping. May also return - the style it looked up. - """ - style = ansi_style_by_name(style) if isinstance(style, str) else style - reversed_style = {v: k for k, v in style.items()} - # add keys to make this more useful - updates = { - "1": "BOLD_", - "2": "FAINT_", - "4": "UNDERLINE_", - "5": "SLOWBLINK_", - "1;4": "BOLD_UNDERLINE_", - "4;1": "BOLD_UNDERLINE_", - "38": "SET_FOREGROUND_", - "48": "SET_BACKGROUND_", - "38;2": "SET_FOREGROUND_3INTS_", - "48;2": "SET_BACKGROUND_3INTS_", - "38;5": "SET_FOREGROUND_SHORT_", - "48;5": "SET_BACKGROUND_SHORT_", - } - for ec, name in reversed_style.items(): - no_left_zero = ec.lstrip("0") - if no_left_zero.startswith(";"): - updates[no_left_zero[1:]] = name - elif no_left_zero != ec: - updates[no_left_zero] = name - reversed_style.update(updates) - # return results - if return_style: - return style, reversed_style - else: - return reversed_style - - -@lazyobject -def ANSI_ESCAPE_CODE_RE(): - return re.compile(r"\001?(\033\[)?([0-9;]+)m?\002?") - - -@lazyobject -def ANSI_REVERSE_COLOR_NAME_TRANSLATIONS(): - base = { - "SET_FOREGROUND_FAINT_": "SET_FOREGROUND_3INTS_", - "SET_BACKGROUND_FAINT_": "SET_BACKGROUND_3INTS_", - "SET_FOREGROUND_SLOWBLINK_": "SET_FOREGROUND_SHORT_", - "SET_BACKGROUND_SLOWBLINK_": "SET_BACKGROUND_SHORT_", - } - data = {"UNDERLINE_BOLD_": "BOLD_UNDERLINE_"} - data.update(base) - data.update({"BOLD_" + k: "BOLD_" + v for k, v in base.items()}) - data.update({"UNDERLINE_" + k: "UNDERLINE_" + v for k, v in base.items()}) - data.update({"BOLD_UNDERLINE_" + k: "BOLD_UNDERLINE_" + v for k, v in base.items()}) - data.update({"UNDERLINE_BOLD_" + k: "BOLD_UNDERLINE_" + v for k, v in base.items()}) - return data - - -@lazyobject -def ANSI_COLOR_NAME_SET_3INTS_RE(): - return re.compile(r"(\w+_)?SET_(FORE|BACK)GROUND_3INTS_(\d+)_(\d+)_(\d+)") - - -@lazyobject -def ANSI_COLOR_NAME_SET_SHORT_RE(): - return re.compile(r"(\w+_)?SET_(FORE|BACK)GROUND_SHORT_(\d+)") - - -def _color_name_from_ints(ints, background=False, prefix=None): - name = find_closest_color(ints, BASE_XONSH_COLORS) - if background: - name = "BACKGROUND_" + name - name = name if prefix is None else prefix + name - return name - - -_ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE = {} - - -def ansi_color_escape_code_to_name(escape_code, style, reversed_style=None): - """Converts an ASNI color code escape sequence to a tuple of color names - in the provided style ('default' should almost be the style). For example, - '0' becomes ('NO_COLOR',) and '32;41' becomes ('GREEN', 'BACKGROUND_RED'). - The style keyword may either be a string, in which the style is looked up, - or an actual style dict. You can also provide a reversed style mapping, - too, which is just the keys/values of the style dict swapped. If reversed - style is not provided, it is computed. - """ - key = (escape_code, style) - if key in _ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE: - return _ANSI_COLOR_ESCAPE_CODE_TO_NAME_CACHE[key] - if reversed_style is None: - style, reversed_style = ansi_reverse_style(style, return_style=True) - # strip some actual escape codes, if needed. - ec = ANSI_ESCAPE_CODE_RE.match(escape_code).group(2) - names = [] - n_ints = 0 - seen_set_foreback = False - for e in ec.split(";"): - no_left_zero = e.lstrip("0") if len(e) > 1 else e - if seen_set_foreback and n_ints > 0: - names.append(e) - n_ints -= 1 - if n_ints == 0: - seen_set_foreback = False - continue - else: - names.append(reversed_style.get(no_left_zero, no_left_zero)) - # set the flags for next time - if "38" == e or "48" == e: - seen_set_foreback = True - elif "2" == e: - n_ints = 3 - elif "5" == e: - n_ints = 1 - # normalize names - n = "" - norm_names = [] - colors = set(reversed_style.values()) - for name in names: - if name == "NO_COLOR": - # skip most '0' entries - continue - n = n + name if n else name - n = ANSI_REVERSE_COLOR_NAME_TRANSLATIONS.get(n, n) - if n.endswith("_"): - continue - elif ANSI_COLOR_NAME_SET_SHORT_RE.match(n) is not None: - pre, fore_back, short = ANSI_COLOR_NAME_SET_SHORT_RE.match(n).groups() - n = _color_name_from_ints( - short_to_ints(short), background=(fore_back == "BACK"), prefix=pre - ) - elif ANSI_COLOR_NAME_SET_3INTS_RE.match(n) is not None: - pre, fore_back, r, g, b = ANSI_COLOR_NAME_SET_3INTS_RE.match(n).groups() - n = _color_name_from_ints( - (int(r), int(g), int(b)), background=(fore_back == "BACK"), prefix=pre - ) - elif "GROUND_3INTS_" in n: - # have 1 or 2, but not 3 ints - n += "_" - continue - # error check - if n not in colors: - msg = ( - "Could not translate ANSI color code {escape_code!r} " - "into a known color in the palette. Specifically, the {n!r} " - "portion of {name!r} in {names!r} seems to missing." - ) - raise ValueError( - msg.format(escape_code=escape_code, names=names, name=name, n=n) - ) - norm_names.append(n) - n = "" - # return - if len(norm_names) == 0: - return ("NO_COLOR",) - else: - return tuple(norm_names) - - -def _ansi_expand_style(cmap): - """Expands a style in order to more quickly make color map changes.""" - for key, val in list(cmap.items()): - if key == "NO_COLOR": - continue - elif len(val) == 0: - cmap["BOLD_" + key] = "1" - cmap["UNDERLINE_" + key] = "4" - cmap["BOLD_UNDERLINE_" + key] = "1;4" - cmap["BACKGROUND_" + key] = val - else: - cmap["BOLD_" + key] = "1;" + val - cmap["UNDERLINE_" + key] = "4;" + val - cmap["BOLD_UNDERLINE_" + key] = "1;4;" + val - cmap["BACKGROUND_" + key] = val.replace("38", "48", 1) - - -def _bw_style(): - style = { - "BLACK": "0;30", - "BLUE": "0;37", - "CYAN": "0;37", - "GREEN": "0;37", - "INTENSE_BLACK": "0;90", - "INTENSE_BLUE": "0;97", - "INTENSE_CYAN": "0;97", - "INTENSE_GREEN": "0;97", - "INTENSE_PURPLE": "0;97", - "INTENSE_RED": "0;97", - "INTENSE_WHITE": "0;97", - "INTENSE_YELLOW": "0;97", - "NO_COLOR": "0", - "PURPLE": "0;37", - "RED": "0;37", - "WHITE": "0;37", - "YELLOW": "0;37", - } - _ansi_expand_style(style) - return style - - -def _default_style(): - style = { - # Reset - "NO_COLOR": "0", # Text Reset - # Regular Colors - "BLACK": "0;30", # BLACK - "RED": "0;31", # RED - "GREEN": "0;32", # GREEN - "YELLOW": "0;33", # YELLOW - "BLUE": "0;34", # BLUE - "PURPLE": "0;35", # PURPLE - "CYAN": "0;36", # CYAN - "WHITE": "0;37", # WHITE - # Bold - "BOLD_BLACK": "1;30", # BLACK - "BOLD_RED": "1;31", # RED - "BOLD_GREEN": "1;32", # GREEN - "BOLD_YELLOW": "1;33", # YELLOW - "BOLD_BLUE": "1;34", # BLUE - "BOLD_PURPLE": "1;35", # PURPLE - "BOLD_CYAN": "1;36", # CYAN - "BOLD_WHITE": "1;37", # WHITE - # Underline - "UNDERLINE_BLACK": "4;30", # BLACK - "UNDERLINE_RED": "4;31", # RED - "UNDERLINE_GREEN": "4;32", # GREEN - "UNDERLINE_YELLOW": "4;33", # YELLOW - "UNDERLINE_BLUE": "4;34", # BLUE - "UNDERLINE_PURPLE": "4;35", # PURPLE - "UNDERLINE_CYAN": "4;36", # CYAN - "UNDERLINE_WHITE": "4;37", # WHITE - # Bold, Underline - "BOLD_UNDERLINE_BLACK": "1;4;30", # BLACK - "BOLD_UNDERLINE_RED": "1;4;31", # RED - "BOLD_UNDERLINE_GREEN": "1;4;32", # GREEN - "BOLD_UNDERLINE_YELLOW": "1;4;33", # YELLOW - "BOLD_UNDERLINE_BLUE": "1;4;34", # BLUE - "BOLD_UNDERLINE_PURPLE": "1;4;35", # PURPLE - "BOLD_UNDERLINE_CYAN": "1;4;36", # CYAN - "BOLD_UNDERLINE_WHITE": "1;4;37", # WHITE - # Background - "BACKGROUND_BLACK": "40", # BLACK - "BACKGROUND_RED": "41", # RED - "BACKGROUND_GREEN": "42", # GREEN - "BACKGROUND_YELLOW": "43", # YELLOW - "BACKGROUND_BLUE": "44", # BLUE - "BACKGROUND_PURPLE": "45", # PURPLE - "BACKGROUND_CYAN": "46", # CYAN - "BACKGROUND_WHITE": "47", # WHITE - # High Intensity - "INTENSE_BLACK": "0;90", # BLACK - "INTENSE_RED": "0;91", # RED - "INTENSE_GREEN": "0;92", # GREEN - "INTENSE_YELLOW": "0;93", # YELLOW - "INTENSE_BLUE": "0;94", # BLUE - "INTENSE_PURPLE": "0;95", # PURPLE - "INTENSE_CYAN": "0;96", # CYAN - "INTENSE_WHITE": "0;97", # WHITE - # Bold High Intensity - "BOLD_INTENSE_BLACK": "1;90", # BLACK - "BOLD_INTENSE_RED": "1;91", # RED - "BOLD_INTENSE_GREEN": "1;92", # GREEN - "BOLD_INTENSE_YELLOW": "1;93", # YELLOW - "BOLD_INTENSE_BLUE": "1;94", # BLUE - "BOLD_INTENSE_PURPLE": "1;95", # PURPLE - "BOLD_INTENSE_CYAN": "1;96", # CYAN - "BOLD_INTENSE_WHITE": "1;97", # WHITE - # Underline High Intensity - "UNDERLINE_INTENSE_BLACK": "4;90", # BLACK - "UNDERLINE_INTENSE_RED": "4;91", # RED - "UNDERLINE_INTENSE_GREEN": "4;92", # GREEN - "UNDERLINE_INTENSE_YELLOW": "4;93", # YELLOW - "UNDERLINE_INTENSE_BLUE": "4;94", # BLUE - "UNDERLINE_INTENSE_PURPLE": "4;95", # PURPLE - "UNDERLINE_INTENSE_CYAN": "4;96", # CYAN - "UNDERLINE_INTENSE_WHITE": "4;97", # WHITE - # Bold Underline High Intensity - "BOLD_UNDERLINE_INTENSE_BLACK": "1;4;90", # BLACK - "BOLD_UNDERLINE_INTENSE_RED": "1;4;91", # RED - "BOLD_UNDERLINE_INTENSE_GREEN": "1;4;92", # GREEN - "BOLD_UNDERLINE_INTENSE_YELLOW": "1;4;93", # YELLOW - "BOLD_UNDERLINE_INTENSE_BLUE": "1;4;94", # BLUE - "BOLD_UNDERLINE_INTENSE_PURPLE": "1;4;95", # PURPLE - "BOLD_UNDERLINE_INTENSE_CYAN": "1;4;96", # CYAN - "BOLD_UNDERLINE_INTENSE_WHITE": "1;4;97", # WHITE - # High Intensity backgrounds - "BACKGROUND_INTENSE_BLACK": "0;100", # BLACK - "BACKGROUND_INTENSE_RED": "0;101", # RED - "BACKGROUND_INTENSE_GREEN": "0;102", # GREEN - "BACKGROUND_INTENSE_YELLOW": "0;103", # YELLOW - "BACKGROUND_INTENSE_BLUE": "0;104", # BLUE - "BACKGROUND_INTENSE_PURPLE": "0;105", # PURPLE - "BACKGROUND_INTENSE_CYAN": "0;106", # CYAN - "BACKGROUND_INTENSE_WHITE": "0;107", # WHITE - } - return style - - -def _monokai_style(): - style = { - "NO_COLOR": "0", - "BLACK": "38;5;16", - "BLUE": "38;5;63", - "CYAN": "38;5;81", - "GREEN": "38;5;40", - "PURPLE": "38;5;89", - "RED": "38;5;124", - "WHITE": "38;5;188", - "YELLOW": "38;5;184", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;20", - "INTENSE_CYAN": "38;5;44", - "INTENSE_GREEN": "38;5;148", - "INTENSE_PURPLE": "38;5;141", - "INTENSE_RED": "38;5;197", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;186", - } - _ansi_expand_style(style) - return style - - -#################################### -# Auto-generated below this line # -#################################### - - -def _algol_style(): - style = { - "BLACK": "38;5;59", - "BLUE": "38;5;59", - "CYAN": "38;5;59", - "GREEN": "38;5;59", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;102", - "INTENSE_CYAN": "38;5;102", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;102", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;102", - "INTENSE_YELLOW": "38;5;102", - "NO_COLOR": "0", - "PURPLE": "38;5;59", - "RED": "38;5;09", - "WHITE": "38;5;102", - "YELLOW": "38;5;09", - } - _ansi_expand_style(style) - return style - - -def _algol_nu_style(): - style = { - "BLACK": "38;5;59", - "BLUE": "38;5;59", - "CYAN": "38;5;59", - "GREEN": "38;5;59", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;102", - "INTENSE_CYAN": "38;5;102", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;102", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;102", - "INTENSE_YELLOW": "38;5;102", - "NO_COLOR": "0", - "PURPLE": "38;5;59", - "RED": "38;5;09", - "WHITE": "38;5;102", - "YELLOW": "38;5;09", - } - _ansi_expand_style(style) - return style - - -def _autumn_style(): - style = { - "BLACK": "38;5;18", - "BLUE": "38;5;19", - "CYAN": "38;5;37", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;33", - "INTENSE_CYAN": "38;5;33", - "INTENSE_GREEN": "38;5;64", - "INTENSE_PURPLE": "38;5;217", - "INTENSE_RED": "38;5;130", - "INTENSE_WHITE": "38;5;145", - "INTENSE_YELLOW": "38;5;217", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;130", - } - _ansi_expand_style(style) - return style - - -def _borland_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;18", - "CYAN": "38;5;30", - "GREEN": "38;5;28", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;21", - "INTENSE_CYAN": "38;5;194", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;188", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;224", - "INTENSE_YELLOW": "38;5;188", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;124", - } - _ansi_expand_style(style) - return style - - -def _colorful_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;20", - "CYAN": "38;5;31", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;61", - "INTENSE_CYAN": "38;5;145", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;217", - "INTENSE_RED": "38;5;166", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;217", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;130", - } - _ansi_expand_style(style) - return style - - -def _emacs_style(): - style = { - "BLACK": "38;5;28", - "BLUE": "38;5;18", - "CYAN": "38;5;26", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;26", - "INTENSE_CYAN": "38;5;145", - "INTENSE_GREEN": "38;5;34", - "INTENSE_PURPLE": "38;5;129", - "INTENSE_RED": "38;5;167", - "INTENSE_WHITE": "38;5;145", - "INTENSE_YELLOW": "38;5;145", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;130", - } - _ansi_expand_style(style) - return style - - -def _friendly_style(): - style = { - "BLACK": "38;5;22", - "BLUE": "38;5;18", - "CYAN": "38;5;31", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;74", - "INTENSE_CYAN": "38;5;74", - "INTENSE_GREEN": "38;5;71", - "INTENSE_PURPLE": "38;5;134", - "INTENSE_RED": "38;5;167", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;145", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;166", - } - _ansi_expand_style(style) - return style - - -def _fruity_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;32", - "CYAN": "38;5;32", - "GREEN": "38;5;28", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;33", - "INTENSE_CYAN": "38;5;33", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;198", - "INTENSE_RED": "38;5;202", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;187", - "NO_COLOR": "0", - "PURPLE": "38;5;198", - "RED": "38;5;09", - "WHITE": "38;5;187", - "YELLOW": "38;5;202", - } - _ansi_expand_style(style) - return style - - -def _igor_style(): - style = { - "BLACK": "38;5;34", - "BLUE": "38;5;21", - "CYAN": "38;5;30", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;30", - "INTENSE_BLUE": "38;5;21", - "INTENSE_CYAN": "38;5;30", - "INTENSE_GREEN": "38;5;34", - "INTENSE_PURPLE": "38;5;163", - "INTENSE_RED": "38;5;166", - "INTENSE_WHITE": "38;5;163", - "INTENSE_YELLOW": "38;5;166", - "NO_COLOR": "0", - "PURPLE": "38;5;163", - "RED": "38;5;166", - "WHITE": "38;5;163", - "YELLOW": "38;5;166", - } - _ansi_expand_style(style) - return style - - -def _lovelace_style(): - style = { - "BLACK": "38;5;59", - "BLUE": "38;5;25", - "CYAN": "38;5;29", - "GREEN": "38;5;65", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;25", - "INTENSE_CYAN": "38;5;102", - "INTENSE_GREEN": "38;5;29", - "INTENSE_PURPLE": "38;5;133", - "INTENSE_RED": "38;5;131", - "INTENSE_WHITE": "38;5;102", - "INTENSE_YELLOW": "38;5;136", - "NO_COLOR": "0", - "PURPLE": "38;5;133", - "RED": "38;5;124", - "WHITE": "38;5;102", - "YELLOW": "38;5;130", - } - _ansi_expand_style(style) - return style - - -def _manni_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;18", - "CYAN": "38;5;30", - "GREEN": "38;5;40", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;105", - "INTENSE_CYAN": "38;5;45", - "INTENSE_GREEN": "38;5;113", - "INTENSE_PURPLE": "38;5;165", - "INTENSE_RED": "38;5;202", - "INTENSE_WHITE": "38;5;224", - "INTENSE_YELLOW": "38;5;221", - "NO_COLOR": "0", - "PURPLE": "38;5;165", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;166", - } - _ansi_expand_style(style) - return style - - -def _murphy_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;18", - "CYAN": "38;5;31", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;63", - "INTENSE_CYAN": "38;5;86", - "INTENSE_GREEN": "38;5;86", - "INTENSE_PURPLE": "38;5;213", - "INTENSE_RED": "38;5;209", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;222", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;166", - } - _ansi_expand_style(style) - return style - - -def _native_style(): - style = { - "BLACK": "38;5;52", - "BLUE": "38;5;67", - "CYAN": "38;5;31", - "GREEN": "38;5;64", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;68", - "INTENSE_CYAN": "38;5;87", - "INTENSE_GREEN": "38;5;70", - "INTENSE_PURPLE": "38;5;188", - "INTENSE_RED": "38;5;160", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;214", - "NO_COLOR": "0", - "PURPLE": "38;5;59", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;124", - } - _ansi_expand_style(style) - return style - - -def _paraiso_dark_style(): - style = { - "BLACK": "38;5;95", - "BLUE": "38;5;97", - "CYAN": "38;5;39", - "GREEN": "38;5;72", - "INTENSE_BLACK": "38;5;95", - "INTENSE_BLUE": "38;5;97", - "INTENSE_CYAN": "38;5;79", - "INTENSE_GREEN": "38;5;72", - "INTENSE_PURPLE": "38;5;188", - "INTENSE_RED": "38;5;203", - "INTENSE_WHITE": "38;5;188", - "INTENSE_YELLOW": "38;5;220", - "NO_COLOR": "0", - "PURPLE": "38;5;97", - "RED": "38;5;203", - "WHITE": "38;5;79", - "YELLOW": "38;5;214", - } - _ansi_expand_style(style) - return style - - -def _paraiso_light_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;16", - "CYAN": "38;5;39", - "GREEN": "38;5;72", - "INTENSE_BLACK": "38;5;16", - "INTENSE_BLUE": "38;5;97", - "INTENSE_CYAN": "38;5;79", - "INTENSE_GREEN": "38;5;72", - "INTENSE_PURPLE": "38;5;97", - "INTENSE_RED": "38;5;203", - "INTENSE_WHITE": "38;5;79", - "INTENSE_YELLOW": "38;5;220", - "NO_COLOR": "0", - "PURPLE": "38;5;97", - "RED": "38;5;16", - "WHITE": "38;5;102", - "YELLOW": "38;5;214", - } - _ansi_expand_style(style) - return style - - -def _pastie_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;20", - "CYAN": "38;5;25", - "GREEN": "38;5;28", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;61", - "INTENSE_CYAN": "38;5;194", - "INTENSE_GREEN": "38;5;34", - "INTENSE_PURPLE": "38;5;188", - "INTENSE_RED": "38;5;172", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;188", - "NO_COLOR": "0", - "PURPLE": "38;5;125", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;130", - } - _ansi_expand_style(style) - return style - - -def _perldoc_style(): - style = { - "BLACK": "38;5;18", - "BLUE": "38;5;18", - "CYAN": "38;5;31", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;134", - "INTENSE_CYAN": "38;5;145", - "INTENSE_GREEN": "38;5;28", - "INTENSE_PURPLE": "38;5;134", - "INTENSE_RED": "38;5;167", - "INTENSE_WHITE": "38;5;188", - "INTENSE_YELLOW": "38;5;188", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;166", - } - _ansi_expand_style(style) - return style - - -def _rrt_style(): - style = { - "BLACK": "38;5;09", - "BLUE": "38;5;117", - "CYAN": "38;5;117", - "GREEN": "38;5;46", - "INTENSE_BLACK": "38;5;117", - "INTENSE_BLUE": "38;5;117", - "INTENSE_CYAN": "38;5;122", - "INTENSE_GREEN": "38;5;46", - "INTENSE_PURPLE": "38;5;213", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;188", - "INTENSE_YELLOW": "38;5;222", - "NO_COLOR": "0", - "PURPLE": "38;5;213", - "RED": "38;5;09", - "WHITE": "38;5;117", - "YELLOW": "38;5;09", - } - _ansi_expand_style(style) - return style - - -def _tango_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;20", - "CYAN": "38;5;61", - "GREEN": "38;5;34", - "INTENSE_BLACK": "38;5;24", - "INTENSE_BLUE": "38;5;62", - "INTENSE_CYAN": "38;5;15", - "INTENSE_GREEN": "38;5;64", - "INTENSE_PURPLE": "38;5;15", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;15", - "INTENSE_YELLOW": "38;5;178", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;15", - "YELLOW": "38;5;94", - } - _ansi_expand_style(style) - return style - - -def _trac_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;18", - "CYAN": "38;5;30", - "GREEN": "38;5;100", - "INTENSE_BLACK": "38;5;59", - "INTENSE_BLUE": "38;5;60", - "INTENSE_CYAN": "38;5;194", - "INTENSE_GREEN": "38;5;102", - "INTENSE_PURPLE": "38;5;188", - "INTENSE_RED": "38;5;137", - "INTENSE_WHITE": "38;5;224", - "INTENSE_YELLOW": "38;5;188", - "NO_COLOR": "0", - "PURPLE": "38;5;90", - "RED": "38;5;124", - "WHITE": "38;5;145", - "YELLOW": "38;5;100", - } - _ansi_expand_style(style) - return style - - -def _vim_style(): - style = { - "BLACK": "38;5;18", - "BLUE": "38;5;18", - "CYAN": "38;5;44", - "GREEN": "38;5;40", - "INTENSE_BLACK": "38;5;60", - "INTENSE_BLUE": "38;5;68", - "INTENSE_CYAN": "38;5;44", - "INTENSE_GREEN": "38;5;40", - "INTENSE_PURPLE": "38;5;164", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;188", - "INTENSE_YELLOW": "38;5;184", - "NO_COLOR": "0", - "PURPLE": "38;5;164", - "RED": "38;5;160", - "WHITE": "38;5;188", - "YELLOW": "38;5;160", - } - _ansi_expand_style(style) - return style - - -def _vs_style(): - style = { - "BLACK": "38;5;28", - "BLUE": "38;5;21", - "CYAN": "38;5;31", - "GREEN": "38;5;28", - "INTENSE_BLACK": "38;5;31", - "INTENSE_BLUE": "38;5;31", - "INTENSE_CYAN": "38;5;31", - "INTENSE_GREEN": "38;5;31", - "INTENSE_PURPLE": "38;5;31", - "INTENSE_RED": "38;5;09", - "INTENSE_WHITE": "38;5;31", - "INTENSE_YELLOW": "38;5;31", - "NO_COLOR": "0", - "PURPLE": "38;5;124", - "RED": "38;5;124", - "WHITE": "38;5;31", - "YELLOW": "38;5;124", - } - _ansi_expand_style(style) - return style - - -def _xcode_style(): - style = { - "BLACK": "38;5;16", - "BLUE": "38;5;20", - "CYAN": "38;5;60", - "GREEN": "38;5;28", - "INTENSE_BLACK": "38;5;60", - "INTENSE_BLUE": "38;5;20", - "INTENSE_CYAN": "38;5;60", - "INTENSE_GREEN": "38;5;60", - "INTENSE_PURPLE": "38;5;126", - "INTENSE_RED": "38;5;160", - "INTENSE_WHITE": "38;5;60", - "INTENSE_YELLOW": "38;5;94", - "NO_COLOR": "0", - "PURPLE": "38;5;126", - "RED": "38;5;160", - "WHITE": "38;5;60", - "YELLOW": "38;5;94", - } - _ansi_expand_style(style) - return style - - -ANSI_STYLES = LazyDict( - { - "algol": _algol_style, - "algol_nu": _algol_nu_style, - "autumn": _autumn_style, - "borland": _borland_style, - "bw": _bw_style, - "colorful": _colorful_style, - "default": _default_style, - "emacs": _emacs_style, - "friendly": _friendly_style, - "fruity": _fruity_style, - "igor": _igor_style, - "lovelace": _lovelace_style, - "manni": _manni_style, - "monokai": _monokai_style, - "murphy": _murphy_style, - "native": _native_style, - "paraiso-dark": _paraiso_dark_style, - "paraiso-light": _paraiso_light_style, - "pastie": _pastie_style, - "perldoc": _perldoc_style, - "rrt": _rrt_style, - "tango": _tango_style, - "trac": _trac_style, - "vim": _vim_style, - "vs": _vs_style, - "xcode": _xcode_style, - }, - globals(), - "ANSI_STYLES", -) - -del ( - _algol_style, - _algol_nu_style, - _autumn_style, - _borland_style, - _bw_style, - _colorful_style, - _default_style, - _emacs_style, - _friendly_style, - _fruity_style, - _igor_style, - _lovelace_style, - _manni_style, - _monokai_style, - _murphy_style, - _native_style, - _paraiso_dark_style, - _paraiso_light_style, - _pastie_style, - _perldoc_style, - _rrt_style, - _tango_style, - _trac_style, - _vim_style, - _vs_style, - _xcode_style, -) - - -# -# Dynamically generated styles -# -def make_ansi_style(palette): - """Makes an ANSI color style from a color palette""" - style = {"NO_COLOR": "0"} - for name, t in BASE_XONSH_COLORS.items(): - closest = find_closest_color(t, palette) - if len(closest) == 3: - closest = "".join([a * 2 for a in closest]) - short = rgb2short(closest)[0] - style[name] = "38;5;" + short - style["BOLD_" + name] = "1;38;5;" + short - style["UNDERLINE_" + name] = "4;38;5;" + short - style["BOLD_UNDERLINE_" + name] = "1;4;38;5;" + short - style["BACKGROUND_" + name] = "48;5;" + short - return style - - -def ansi_style_by_name(name): - """Gets or makes an ANSI color style by name. If the styles does not - exist, it will look for a style using the pygments name. - """ - if name in ANSI_STYLES: - return ANSI_STYLES[name] - elif not HAS_PYGMENTS: - raise KeyError("could not find style {0!r}".format(name)) - from xonsh.pygments_cache import get_style_by_name - - pstyle = get_style_by_name(name) - palette = make_palette(pstyle.styles.values()) - astyle = make_ansi_style(palette) - ANSI_STYLES[name] = astyle - return astyle diff --git a/xonsh/ast.py b/xonsh/ast.py index 021e774..f38d535 100644 --- a/xonsh/ast.py +++ b/xonsh/ast.py @@ -1,155 +1,27 @@ -# -*- coding: utf-8 -*- """The xonsh abstract syntax tree node.""" -# These are imported into our module namespace for the benefit of parser.py. -# pylint: disable=unused-import -import sys -import builtins -from ast import ( - Module, - Num, - Expr, - Str, - Bytes, - UnaryOp, - UAdd, - USub, - Invert, - BinOp, - Add, - Sub, - Mult, - Div, - FloorDiv, - Mod, - Pow, - Compare, - Lt, - Gt, - LtE, - GtE, - Eq, - NotEq, - In, - NotIn, - Is, - IsNot, - Not, - BoolOp, - Or, - And, - Subscript, - Load, - Slice, - ExtSlice, - List, - Tuple, - Set, - Dict, - AST, - NameConstant, - Name, - GeneratorExp, - Store, - comprehension, - ListComp, - SetComp, - DictComp, - Assign, - AugAssign, - BitXor, - BitAnd, - BitOr, - LShift, - RShift, - Assert, - Delete, - Del, - Pass, - Raise, - Import, - alias, - ImportFrom, - Continue, - Break, - Yield, - YieldFrom, - Return, - IfExp, - Lambda, - arguments, - arg, - Call, - keyword, - Attribute, - Global, - Nonlocal, - If, - While, - For, - withitem, - With, - Try, - ExceptHandler, - FunctionDef, - ClassDef, - Starred, - NodeTransformer, - Interactive, - Expression, - Index, - literal_eval, - dump, - walk, - increment_lineno, -) -from ast import Ellipsis as EllipsisNode - -# pylint: enable=unused-import -import textwrap -import itertools - -from xonsh.tools import subproc_toks, find_next_break, get_logical_line -from xonsh.platform import PYTHON_VERSION_INFO - -if PYTHON_VERSION_INFO >= (3, 5, 0): - # pylint: disable=unused-import - # pylint: disable=no-name-in-module +from ast import Module, Num, Expr, Str, Bytes, UnaryOp, UAdd, USub, Invert, \ + BinOp, Add, Sub, Mult, Div, FloorDiv, Mod, Pow, Compare, Lt, Gt, \ + LtE, GtE, Eq, NotEq, In, NotIn, Is, IsNot, Not, BoolOp, Or, And, Subscript, \ + Load, Slice, List, Tuple, Set, Dict, AST, NameConstant, \ + Name, GeneratorExp, Store, comprehension, ListComp, SetComp, DictComp, \ + Assign, AugAssign, BitXor, BitAnd, BitOr, LShift, RShift, Assert, Delete, \ + Del, Pass, Raise, Import, alias, ImportFrom, Continue, Break, Yield, \ + YieldFrom, Return, IfExp, Lambda, arguments, arg, Call, keyword, \ + Attribute, Global, Nonlocal, If, While, For, withitem, With, Try, \ + ExceptHandler, FunctionDef, ClassDef, Starred, NodeTransformer, \ + Interactive, Expression, dump +from ast import Ellipsis, Index # pylint:disable=unused-import,redefined-builtin + +from xonsh.tools import subproc_toks, VER_3_5, VER_MAJOR_MINOR + +if VER_3_5 <= VER_MAJOR_MINOR: from ast import MatMult, AsyncFunctionDef, AsyncWith, AsyncFor, Await else: MatMult = AsyncFunctionDef = AsyncWith = AsyncFor = Await = None -if PYTHON_VERSION_INFO >= (3, 6, 0): - # pylint: disable=unused-import - # pylint: disable=no-name-in-module - from ast import JoinedStr, FormattedValue, AnnAssign -else: - JoinedStr = FormattedValue = AnnAssign = None - -STATEMENTS = ( - FunctionDef, - ClassDef, - Return, - Delete, - Assign, - AugAssign, - For, - While, - If, - With, - Raise, - Try, - Assert, - Import, - ImportFrom, - Global, - Nonlocal, - Expr, - Pass, - Break, - Continue, -) -if PYTHON_VERSION_INFO >= (3, 6, 0): - STATEMENTS += (AnnAssign,) +STATEMENTS = (FunctionDef, ClassDef, Return, Delete, Assign, AugAssign, For, + While, If, With, Raise, Try, Assert, Import, ImportFrom, Global, + Nonlocal, Expr, Pass, Break, Continue) def leftmostname(node): @@ -162,167 +34,18 @@ def leftmostname(node): rtn = leftmostname(node.value) elif isinstance(node, Call): rtn = leftmostname(node.func) - elif isinstance(node, UnaryOp): - rtn = leftmostname(node.operand) - elif isinstance(node, BoolOp): - rtn = leftmostname(node.values[0]) - elif isinstance(node, (Assign, AnnAssign)): + elif isinstance(node, (BinOp, Compare)): + rtn = leftmostname(node.left) + elif isinstance(node, Assign): rtn = leftmostname(node.targets[0]) - elif isinstance(node, (Str, Bytes, JoinedStr)): + elif isinstance(node, (Str, Bytes)): # handles case of "./my executable" rtn = leftmostname(node.s) - elif isinstance(node, Tuple) and len(node.elts) > 0: - # handles case of echo ,1,2,3 - rtn = leftmostname(node.elts[0]) else: rtn = None return rtn -def get_lineno(node, default=0): - """Gets the lineno of a node or returns the default.""" - return getattr(node, "lineno", default) - - -def min_line(node): - """Computes the minimum lineno.""" - node_line = get_lineno(node) - return min(map(get_lineno, walk(node), itertools.repeat(node_line))) - - -def max_line(node): - """Computes the maximum lineno.""" - return max(map(get_lineno, walk(node))) - - -def get_col(node, default=-1): - """Gets the col_offset of a node, or returns the default""" - return getattr(node, "col_offset", default) - - -def min_col(node): - """Computes the minimum col_offset.""" - return min(map(get_col, walk(node), itertools.repeat(node.col_offset))) - - -def max_col(node): - """Returns the maximum col_offset of the node and all sub-nodes.""" - col = getattr(node, "max_col", None) - if col is not None: - return col - highest = max(walk(node), key=get_col) - col = highest.col_offset + node_len(highest) - return col - - -def node_len(node): - """The length of a node as a string""" - val = 0 - for n in walk(node): - if isinstance(n, Name): - val += len(n.id) - elif isinstance(n, Attribute): - val += 1 + (len(n.attr) if isinstance(n.attr, str) else 0) - # this may need to be added to for more nodes as more cases are found - return val - - -def get_id(node, default=None): - """Gets the id attribute of a node, or returns a default.""" - return getattr(node, "id", default) - - -def gather_names(node): - """Returns the set of all names present in the node's tree.""" - rtn = set(map(get_id, walk(node))) - rtn.discard(None) - return rtn - - -def get_id_ctx(node): - """Gets the id and attribute of a node, or returns a default.""" - nid = getattr(node, "id", None) - if nid is None: - return (None, None) - return (nid, node.ctx) - - -def gather_load_store_names(node): - """Returns the names present in the node's tree in a set of load nodes and - a set of store nodes. - """ - load = set() - store = set() - for nid, ctx in map(get_id_ctx, walk(node)): - if nid is None: - continue - elif isinstance(ctx, Load): - load.add(nid) - else: - store.add(nid) - return (load, store) - - -def has_elts(x): - """Tests if x is an AST node with elements.""" - return isinstance(x, AST) and hasattr(x, "elts") - - -def load_attribute_chain(name, lineno=None, col=None): - """Creates an AST that loads variable name that may (or may not) - have attribute chains. For example, "a.b.c" - """ - names = name.split(".") - node = Name(id=names.pop(0), ctx=Load(), lineno=lineno, col_offset=col) - for attr in names: - node = Attribute( - value=node, attr=attr, ctx=Load(), lineno=lineno, col_offset=col - ) - return node - - -def xonsh_call(name, args, lineno=None, col=None): - """Creates the AST node for calling a function of a given name. - Functions names may contain attribute access, e.g. __xonsh__.env. - """ - return Call( - func=load_attribute_chain(name, lineno=lineno, col=col), - args=args, - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ) - - -def isdescendable(node): - """Determines whether or not a node is worth visiting. Currently only - UnaryOp and BoolOp nodes are visited. - """ - return isinstance(node, (UnaryOp, BoolOp)) - - -def isexpression(node, ctx=None, *args, **kwargs): - """Determines whether a node (or code string) is an expression, and - does not contain any statements. The execution context (ctx) and - other args and kwargs are passed down to the parser, as needed. - """ - # parse string to AST - if isinstance(node, str): - node = node if node.endswith("\n") else node + "\n" - ctx = builtins.__xonsh__.ctx if ctx is None else ctx - node = builtins.__xonsh__.execer.parse(node, ctx, *args, **kwargs) - # determin if expresission-like enough - if isinstance(node, (Expr, Expression)): - isexpr = True - elif isinstance(node, Module) and len(node.body) == 1: - isexpr = isinstance(node.body[0], (Expr, Expression)) - else: - isexpr = False - return isexpr - - class CtxAwareTransformer(NodeTransformer): """Transforms a xonsh AST based to use subprocess calls when the first name in an expression statement is not known in the context. @@ -334,7 +57,7 @@ def __init__(self, parser): """Parameters ---------- parser : xonsh.Parser - A parse instance to try to parse subprocess statements with. + A parse instance to try to parse suprocess statements with. """ super(CtxAwareTransformer, self).__init__() self.parser = parser @@ -342,11 +65,8 @@ def __init__(self, parser): self.contexts = [] self.lines = None self.mode = None - self._nwith = 0 - self.filename = "" - self.debug_level = 0 - def ctxvisit(self, node, inp, ctx, mode="exec", filename=None, debug_level=0): + def ctxvisit(self, node, inp, ctx, mode='exec'): """Transforms the node in a context-dependent way. Parameters @@ -357,25 +77,17 @@ def ctxvisit(self, node, inp, ctx, mode="exec", filename=None, debug_level=0): The input code in string format. ctx : dict The root context to use. - filename : str, optional - File we are to transform. - debug_level : int, optional - Debugging level to use in lexing and parsing. Returns ------- node : ast.AST The transformed node. """ - self.filename = self.filename if filename is None else filename - self.debug_level = debug_level self.lines = inp.splitlines() self.contexts = [ctx, set()] self.mode = mode - self._nwith = 0 node = self.visit(node) del self.lines, self.contexts, self.mode - self._nwith = 0 return node def ctxupdate(self, iterable): @@ -393,88 +105,41 @@ def ctxremove(self, value): ctx.remove(value) break - def try_subproc_toks(self, node, strip_expr=False): + def try_subproc_toks(self, node): """Tries to parse the line of the node as a subprocess.""" - line, nlogical, idx = get_logical_line(self.lines, node.lineno - 1) - if self.mode == "eval": - mincol = len(line) - len(line.lstrip()) - maxcol = None - else: - mincol = max(min_col(node) - 1, 0) - maxcol = max_col(node) - if mincol == maxcol: - maxcol = find_next_break(line, mincol=mincol, lexer=self.parser.lexer) - elif nlogical > 1: - maxcol = None - elif maxcol < len(line) and line[maxcol] == ";": - pass - else: - maxcol += 1 - spline = subproc_toks( - line, - mincol=mincol, - maxcol=maxcol, - returnline=False, - lexer=self.parser.lexer, - ) - if spline is None or spline != "![{}]".format(line[mincol:maxcol].strip()): - # failed to get something consistent, try greedy wrap - spline = subproc_toks( - line, - mincol=mincol, - maxcol=maxcol, - returnline=False, - lexer=self.parser.lexer, - greedy=True, - ) - if spline is None: - return node + line = self.lines[node.lineno - 1] + mincol = len(line) - len(line.lstrip()) + maxcol = None if self.mode == 'eval' else node.col_offset + spline = subproc_toks(line, + mincol=mincol, + maxcol=maxcol, + returnline=False, + lexer=self.parser.lexer) try: - newnode = self.parser.parse( - spline, - mode=self.mode, - filename=self.filename, - debug_level=(self.debug_level > 2), - ) + newnode = self.parser.parse(spline, mode=self.mode) newnode = newnode.body if not isinstance(newnode, AST): # take the first (and only) Expr newnode = newnode[0] - increment_lineno(newnode, n=node.lineno - 1) + newnode.lineno = node.lineno newnode.col_offset = node.col_offset - if self.debug_level > 1: - msg = "{0}:{1}:{2}{3} - {4}\n" "{0}:{1}:{2}{3} + {5}" - mstr = "" if maxcol is None else ":" + str(maxcol) - msg = msg.format(self.filename, node.lineno, mincol, mstr, line, spline) - print(msg, file=sys.stderr) except SyntaxError: newnode = node - if strip_expr and isinstance(newnode, Expr): - newnode = newnode.value return newnode def is_in_scope(self, node): """Determines whether or not the current node is in scope.""" - names, store = gather_load_store_names(node) - names -= store - if not names: - return True + lname = leftmostname(node) + if lname is None: + return node inscope = False for ctx in reversed(self.contexts): - names -= ctx - if not names: + if lname in ctx: inscope = True break return inscope - # - # Replacement visitors - # - def visit_Expression(self, node): - """Handle visiting an expression body.""" - if isdescendable(node.body): - node.body = self.visit(node.body) body = node.body inscope = self.is_in_scope(body) if not inscope: @@ -482,53 +147,21 @@ def visit_Expression(self, node): return node def visit_Expr(self, node): - """Handle visiting an expression.""" - if isdescendable(node.value): - node.value = self.visit(node.value) # this allows diving into BoolOps - if self.is_in_scope(node) or isinstance(node.value, Lambda): + if self.is_in_scope(node): return node else: newnode = self.try_subproc_toks(node) if not isinstance(newnode, Expr): - newnode = Expr( - value=newnode, lineno=node.lineno, col_offset=node.col_offset - ) - if hasattr(node, "max_lineno"): - newnode.max_lineno = node.max_lineno - newnode.max_col = node.max_col + newnode = Expr(value=newnode, + lineno=node.lineno, + col_offset=node.col_offset) return newnode - def visit_UnaryOp(self, node): - """Handle visiting an unary operands, like not.""" - if isdescendable(node.operand): - node.operand = self.visit(node.operand) - operand = node.operand - inscope = self.is_in_scope(operand) - if not inscope: - node.operand = self.try_subproc_toks(operand, strip_expr=True) - return node - - def visit_BoolOp(self, node): - """Handle visiting an boolean operands, like and/or.""" - for i in range(len(node.values)): - val = node.values[i] - if isdescendable(val): - val = node.values[i] = self.visit(val) - inscope = self.is_in_scope(val) - if not inscope: - node.values[i] = self.try_subproc_toks(val, strip_expr=True) - return node - - # - # Context aggregator visitors - # - def visit_Assign(self, node): - """Handle visiting an assignment statement.""" ups = set() for targ in node.targets: if isinstance(targ, (Tuple, List)): - ups.update(leftmostname(elt) for elt in targ.elts) + ups.update(map(leftmostname, targ.elts)) elif isinstance(targ, BinOp): newnode = self.try_subproc_toks(node) if newnode is node: @@ -540,10 +173,7 @@ def visit_Assign(self, node): self.ctxupdate(ups) return node - visit_AnnAssign = visit_Assign - def visit_Import(self, node): - """Handle visiting a import statement.""" for name in node.names: if name.asname is None: self.ctxadd(name.name) @@ -552,7 +182,6 @@ def visit_Import(self, node): return node def visit_ImportFrom(self, node): - """Handle visiting a "from ... import ..." statement.""" for name in node.names: if name.asname is None: self.ctxadd(name.name) @@ -561,39 +190,29 @@ def visit_ImportFrom(self, node): return node def visit_With(self, node): - """Handle visiting a with statement.""" for item in node.items: if item.optional_vars is not None: - self.ctxupdate(gather_names(item.optional_vars)) - self._nwith += 1 + self.ctxadd(leftmostname(item.optional_vars)) self.generic_visit(node) - self._nwith -= 1 return node def visit_For(self, node): - """Handle visiting a for statement.""" targ = node.target - self.ctxupdate(gather_names(targ)) + if isinstance(targ, (Tuple, List)): + self.ctxupdate(map(leftmostname, targ.elts)) + else: + self.ctxadd(leftmostname(targ)) self.generic_visit(node) return node def visit_FunctionDef(self, node): - """Handle visiting a function definition.""" self.ctxadd(node.name) self.contexts.append(set()) - args = node.args - argchain = [args.args, args.kwonlyargs] - if args.vararg is not None: - argchain.append((args.vararg,)) - if args.kwarg is not None: - argchain.append((args.kwarg,)) - self.ctxupdate(a.arg for a in itertools.chain.from_iterable(argchain)) self.generic_visit(node) self.contexts.pop() return node def visit_ClassDef(self, node): - """Handle visiting a class definition.""" self.ctxadd(node.name) self.contexts.append(set()) self.generic_visit(node) @@ -601,7 +220,6 @@ def visit_ClassDef(self, node): return node def visit_Delete(self, node): - """Handle visiting a del statement.""" for targ in node.targets: if isinstance(targ, Name): self.ctxremove(targ.id) @@ -609,7 +227,6 @@ def visit_Delete(self, node): return node def visit_Try(self, node): - """Handle visiting a try statement.""" for handler in node.handlers: if handler.name is not None: self.ctxadd(handler.name) @@ -617,56 +234,6 @@ def visit_Try(self, node): return node def visit_Global(self, node): - """Handle visiting a global statement.""" self.contexts[1].update(node.names) # contexts[1] is the global ctx self.generic_visit(node) return node - - -def pdump(s, **kwargs): - """performs a pretty dump of an AST node.""" - if isinstance(s, AST): - s = dump(s, **kwargs).replace(",", ",\n") - openers = "([{" - closers = ")]}" - lens = len(s) + 1 - if lens == 1: - return s - i = min([s.find(o) % lens for o in openers]) - if i == lens - 1: - return s - closer = closers[openers.find(s[i])] - j = s.rfind(closer) - if j == -1 or j <= i: - return s[: i + 1] + "\n" + textwrap.indent(pdump(s[i + 1 :]), " ") - pre = s[: i + 1] + "\n" - mid = s[i + 1 : j] - post = "\n" + s[j:] - mid = textwrap.indent(pdump(mid), " ") - if "(" in post or "[" in post or "{" in post: - post = pdump(post) - return pre + mid + post - - -def pprint_ast(s, *, sep=None, end=None, file=None, flush=False, **kwargs): - """Performs a pretty print of the AST nodes.""" - print(pdump(s, **kwargs), sep=sep, end=end, file=file, flush=flush) - - -# -# Private helpers -# - - -def _getblockattr(name, lineno, col): - """calls getattr(name, '__xonsh_block__', False).""" - return xonsh_call( - "getattr", - args=[ - Name(id=name, ctx=Load(), lineno=lineno, col_offset=col), - Str(s="__xonsh_block__", lineno=lineno, col_offset=col), - NameConstant(value=False, lineno=lineno, col_offset=col), - ], - lineno=lineno, - col=col, - ) diff --git a/xonsh/base_shell.py b/xonsh/base_shell.py index 91facc6..fbe8828 100644 --- a/xonsh/base_shell.py +++ b/xonsh/base_shell.py @@ -1,340 +1,113 @@ -# -*- coding: utf-8 -*- """The base class for xonsh shell""" import io import os +import shlex import sys import time import builtins +import traceback -from xonsh.tools import ( - XonshError, - print_exception, - DefaultNotGiven, - check_for_partial_string, - format_std_prepost, - get_line_continuation, -) -from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS -from xonsh.codecache import ( - should_use_cache, - code_cache_name, - code_cache_check, - get_cache_filename, - update_cache, - run_compiled_code, -) +from xonsh.execer import Execer +from xonsh.tools import XonshError, escape_windows_title_string, ON_WINDOWS, \ + print_exception from xonsh.completer import Completer -from xonsh.prompt.base import multiline_prompt, PromptFormatter -from xonsh.events import events -from xonsh.shell import transform_command -from xonsh.lazyimps import pygments, pyghooks -from xonsh.ansi_colors import ansi_partial_color_format +from xonsh.environ import multiline_prompt, format_prompt -if ON_WINDOWS: - import ctypes - kernel32 = ctypes.windll.kernel32 - kernel32.SetConsoleTitleW.argtypes = [ctypes.c_wchar_p] - - -class _TeeStdBuf(io.RawIOBase): - """A dispatcher for bytes to two buffers, as std stream buffer and an - in memory buffer. +class TeeOut(object): + """Tees stdout into the original sys.stdout and another buffer instance that is + provided. """ - def __init__( - self, stdbuf, membuf, encoding=None, errors=None, prestd=b"", poststd=b"" - ): - """ - Parameters - ---------- - stdbuf : BytesIO-like or StringIO-like - The std stream buffer. - membuf : BytesIO-like - The in memory stream buffer. - encoding : str or None, optional - The encoding of the stream. Only used if stdbuf is a text stream, - rather than a binary one. Defaults to $XONSH_ENCODING if None. - errors : str or None, optional - The error form for the encoding of the stream. Only used if stdbuf - is a text stream, rather than a binary one. Deafults to - $XONSH_ENCODING_ERRORS if None. - prestd : bytes, optional - The prefix to prepend to the standard buffer. - poststd : bytes, optional - The postfix to append to the standard buffer. - """ - self.stdbuf = stdbuf - self.membuf = membuf - env = builtins.__xonsh__.env - self.encoding = env.get("XONSH_ENCODING") if encoding is None else encoding - self.errors = env.get("XONSH_ENCODING_ERRORS") if errors is None else errors - self.prestd = prestd - self.poststd = poststd - self._std_is_binary = not hasattr(stdbuf, "encoding") - - def fileno(self): - """Returns the file descriptor of the std buffer.""" - return self.stdbuf.fileno() - - def seek(self, offset, whence=io.SEEK_SET): - """Sets the location in both the stdbuf and the membuf.""" - self.stdbuf.seek(offset, whence) - self.membuf.seek(offset, whence) - - def truncate(self, size=None): - """Truncate both buffers.""" - self.stdbuf.truncate(size) - self.membuf.truncate(size) - - def readinto(self, b): - """Read bytes into buffer from both streams.""" - if self._std_is_binary: - self.stdbuf.readinto(b) - return self.membuf.readinto(b) - - def write(self, b): - """Write bytes into both buffers.""" - std_b = b - if self.prestd: - std_b = self.prestd + b - if self.poststd: - std_b += self.poststd - # write to stdbuf - if self._std_is_binary: - self.stdbuf.write(std_b) - else: - self.stdbuf.write(std_b.decode(encoding=self.encoding, errors=self.errors)) - return self.membuf.write(b) + def __init__(self, buf, *args, **kwargs): + self.buffer = buf + self.stdout = sys.stdout + sys.stdout = self + def __del__(self): + sys.stdout = self.stdout -class _TeeStd(io.TextIOBase): - """Tees a std stream into an in-memory container and the original stream.""" + def close(self): + """Restores the original stdout.""" + sys.stdout = self.stdout - def __init__(self, name, mem, prestd="", poststd=""): - """ - Parameters - ---------- - name : str - The name of the buffer in the sys module, e.g. 'stdout'. - mem : io.TextIOBase-like - The in-memory text-based representation. - prestd : str, optional - The prefix to prepend to the standard stream. - poststd : str, optional - The postfix to append to the standard stream. - """ - self._name = name - self.std = std = getattr(sys, name) - self.mem = mem - self.prestd = prestd - self.poststd = poststd - preb = prestd.encode(encoding=mem.encoding, errors=mem.errors) - postb = poststd.encode(encoding=mem.encoding, errors=mem.errors) - if hasattr(std, "buffer"): - buffer = _TeeStdBuf(std.buffer, mem.buffer, prestd=preb, poststd=postb) - else: - # TextIO does not have buffer as part of the API, so std streams - # may not either. - buffer = _TeeStdBuf( - std, - mem.buffer, - encoding=mem.encoding, - errors=mem.errors, - prestd=preb, - poststd=postb, - ) - self.buffer = buffer - setattr(sys, name, self) + def write(self, data): + """Writes data to the original stdout and the buffer.""" + self.stdout.write(data) + self.buffer.write(data) - @property - def encoding(self): - """The encoding of the in-memory buffer.""" - return self.mem.encoding + def flush(self): + """Flushes both the original stdout and the buffer.""" + self.stdout.flush() + self.buffer.flush() - @property - def errors(self): - """The errors of the in-memory buffer.""" - return self.mem.errors - @property - def newlines(self): - """The newlines of the in-memory buffer.""" - return self.mem.newlines +class TeeErr(object): + """Tees stderr into the original sys.stdout and another buffer instance that is + provided. + """ - def _replace_std(self): - std = self.std - if std is None: - return - setattr(sys, self._name, std) - self.std = self._name = None + def __init__(self, buf, *args, **kwargs): + self.buffer = buf + self.stderr = sys.stderr + sys.stderr = self def __del__(self): - self._replace_std() + sys.stderr = self.stderr def close(self): - """Restores the original std stream.""" - self._replace_std() + """Restores the original stderr.""" + sys.stderr = self.stderr - def write(self, s): - """Writes data to the original std stream and the in-memory object.""" - self.mem.write(s) - if self.std is None: - return - std_s = s - if self.prestd: - std_s = self.prestd + std_s - if self.poststd: - std_s += self.poststd - self.std.write(std_s) + def write(self, data): + """Writes data to the original stderr and the buffer.""" + self.stderr.write(data) + self.buffer.write(data) def flush(self): - """Flushes both the original stdout and the buffer.""" - self.std.flush() - self.mem.flush() - - def fileno(self): - """Tunnel fileno() calls to the std stream.""" - return self.std.fileno() - - def seek(self, offset, whence=io.SEEK_SET): - """Seek to a location in both streams.""" - self.std.seek(offset, whence) - self.mem.seek(offset, whence) - - def truncate(self, size=None): - """Seek to a location in both streams.""" - self.std.truncate(size) - self.mem.truncate(size) - - def detach(self): - """This operation is not supported.""" - raise io.UnsupportedOperation - - def read(self, size=None): - """Read from the in-memory stream and seek to a new location in the - std stream. - """ - s = self.mem.read(size) - loc = self.std.tell() - self.std.seek(loc + len(s)) - return s - - def readline(self, size=-1): - """Read a line from the in-memory stream and seek to a new location - in the std stream. - """ - s = self.mem.readline(size) - loc = self.std.tell() - self.std.seek(loc + len(s)) - return s - + """Flushes both the original stderr and the buffer.""" + self.stderr.flush() + self.buffer.flush() -class Tee: - """Class that merges tee'd stdout and stderr into a single stream. +class Tee(io.StringIO): + """Class that merges tee'd stdout and stderr into a single buffer, namely itself. This represents what a user would actually see on the command line. - This class has the same interface as io.TextIOWrapper, except that - the buffer is optional. """ - # pylint is a stupid about counting public methods when using inheritance. - # pylint: disable=too-few-public-methods - - def __init__( - self, - buffer=None, - encoding=None, - errors=None, - newline=None, - line_buffering=False, - write_through=False, - ): - self.buffer = io.BytesIO() if buffer is None else buffer - self.memory = io.TextIOWrapper( - self.buffer, - encoding=encoding, - errors=errors, - newline=newline, - line_buffering=line_buffering, - write_through=write_through, - ) - self.stdout = _TeeStd("stdout", self.memory) - env = builtins.__xonsh__.env - prestderr = format_std_prepost(env.get("XONSH_STDERR_PREFIX")) - poststderr = format_std_prepost(env.get("XONSH_STDERR_POSTFIX")) - self.stderr = _TeeStd( - "stderr", self.memory, prestd=prestderr, poststd=poststderr - ) - - @property - def line_buffering(self): - return self.memory.line_buffering + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.stdout = TeeOut(self) + self.stderr = TeeErr(self) def __del__(self): del self.stdout, self.stderr - self.stdout = self.stderr = None + super().__del__() def close(self): """Closes the buffer as well as the stdout and stderr tees.""" self.stdout.close() self.stderr.close() - self.memory.close() - - def getvalue(self): - """Gets the current contents of the in-memory buffer.""" - m = self.memory - loc = m.tell() - m.seek(0) - s = m.read() - m.seek(loc) - return s + super().close() class BaseShell(object): """The xonsh shell.""" def __init__(self, execer, ctx, **kwargs): - super().__init__() + super().__init__(**kwargs) self.execer = execer self.ctx = ctx - self.completer = Completer() if kwargs.get("completer", True) else None + self.completer = Completer() self.buffer = [] self.need_more_lines = False self.mlprompt = None - self._styler = DefaultNotGiven - self.prompt_formatter = PromptFormatter() - self.accumulated_inputs = "" - - @property - def styler(self): - if self._styler is DefaultNotGiven: - if HAS_PYGMENTS: - from xonsh.pyghooks import XonshStyle - - env = builtins.__xonsh__.env - self._styler = XonshStyle(env.get("XONSH_COLOR_STYLE")) - else: - self._styler = None - return self._styler - - @styler.setter - def styler(self, value): - self._styler = value - - @styler.deleter - def styler(self): - self._styler = DefaultNotGiven + self.gitsome = None def emptyline(self): """Called when an empty line has been entered.""" self.need_more_lines = False - self.default("") - - def singleline(self, **kwargs): - """Reads a single line of input from the shell.""" - msg = "{0} has not implemented singleline()." - raise RuntimeError(msg.format(self.__class__.__name__)) + self.default('') def precmd(self, line): """Called just before execution of line.""" @@ -342,153 +115,60 @@ def precmd(self, line): def default(self, line): """Implements code execution.""" - line = line if line.endswith("\n") else line + "\n" + line = line if line.endswith('\n') else line + '\n' src, code = self.push(line) if code is None: return - - events.on_precommand.fire(cmd=src) - - env = builtins.__xonsh__.env - hist = builtins.__xonsh__.history # pylint: disable=no-member + hist = builtins.__xonsh_history__ ts1 = None - enc = env.get("XONSH_ENCODING") - err = env.get("XONSH_ENCODING_ERRORS") - tee = Tee(encoding=enc, errors=err) + tee = Tee() if builtins.__xonsh_env__.get('XONSH_STORE_STDOUT') \ + else io.StringIO() try: ts0 = time.time() - run_compiled_code(code, self.ctx, None, "single") + self.execer.exec(code, mode='single', glbs=self.ctx) # no locals ts1 = time.time() - if hist is not None and hist.last_cmd_rtn is None: + if hist.last_cmd_rtn is None: hist.last_cmd_rtn = 0 # returncode for success except XonshError as e: print(e.args[0], file=sys.stderr) - if hist is not None and hist.last_cmd_rtn is None: + if hist.last_cmd_rtn is None: hist.last_cmd_rtn = 1 # return code for failure - except Exception: # pylint: disable=broad-except + except Exception: print_exception() - if hist is not None and hist.last_cmd_rtn is None: + if hist.last_cmd_rtn is None: hist.last_cmd_rtn = 1 # return code for failure finally: ts1 = ts1 or time.time() - tee_out = tee.getvalue() - self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee_out) - self.accumulated_inputs += src - if ( - tee_out - and env.get("XONSH_APPEND_NEWLINE") - and not tee_out.endswith(os.linesep) - ): - print(os.linesep, end="") + self._append_history(inp=src, ts=[ts0, ts1], tee_out=tee.getvalue()) tee.close() - self._fix_cwd() - if builtins.__xonsh__.exit: # pylint: disable=no-member + if builtins.__xonsh_exit__: return True - def _append_history(self, tee_out=None, **info): - """Append information about the command to the history. - - This also handles on_postcommand because this is the place where all the - information is available. - """ - hist = builtins.__xonsh__.history # pylint: disable=no-member - info["rtn"] = hist.last_cmd_rtn if hist is not None else None - tee_out = tee_out or None - last_out = hist.last_cmd_out if hist is not None else None - if last_out is None and tee_out is None: - pass - elif last_out is None and tee_out is not None: - info["out"] = tee_out - elif last_out is not None and tee_out is None: - info["out"] = last_out - else: - info["out"] = tee_out + "\n" + last_out - events.on_postcommand.fire( - cmd=info["inp"], rtn=info["rtn"], out=info.get("out", None), ts=info["ts"] - ) - if hist is not None: - hist.append(info) - hist.last_cmd_rtn = hist.last_cmd_out = None - - def _fix_cwd(self): - """Check if the cwd changed out from under us.""" - env = builtins.__xonsh__.env - try: - cwd = os.getcwd() - except (FileNotFoundError, OSError): - cwd = None - if cwd is None: - # directory has been deleted out from under us, most likely - pwd = env.get("PWD", None) - if pwd is None: - # we have no idea where we are - env["PWD"] = "" - elif os.path.isdir(pwd): - # unclear why os.getcwd() failed. do nothing. - pass - else: - # OK PWD is really gone. - msg = "{UNDERLINE_INTENSE_WHITE}{BACKGROUND_INTENSE_BLACK}" - msg += "xonsh: working directory does not exist: " + pwd - msg += "{NO_COLOR}" - self.print_color(msg, file=sys.stderr) - elif "PWD" not in env: - # $PWD is missing from env, recreate it - env["PWD"] = cwd - elif os.path.realpath(cwd) != os.path.realpath(env["PWD"]): - # The working directory has changed without updating $PWD, fix this - old = env["PWD"] - env["PWD"] = cwd - env["OLDPWD"] = old - events.on_chdir.fire(olddir=old, newdir=cwd) - def push(self, line): """Pushes a line onto the buffer and compiles the code in a way that enables multiline input. """ + code = None self.buffer.append(line) if self.need_more_lines: - return None, None - src = "".join(self.buffer) - src = transform_command(src) - return self.compile(src) - - def compile(self, src): - """Compiles source code and returns the (possibly modified) source and - a valid code object. - """ - _cache = should_use_cache(self.execer, "single") - if _cache: - codefname = code_cache_name(src) - cachefname = get_cache_filename(codefname, code=True) - usecache, code = code_cache_check(cachefname) - if usecache: - self.reset_buffer() - return src, code - lincont = get_line_continuation() - if src.endswith(lincont + "\n"): - self.need_more_lines = True - return src, None + return None, code + src = ''.join(self.buffer) try: - code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None) - if _cache: - update_cache(code, cachefname) + code = self.execer.compile(src, + mode='single', + glbs=None, + locs=self.ctx) self.reset_buffer() except SyntaxError: - partial_string_info = check_for_partial_string(src) - in_partial_string = ( - partial_string_info[0] is not None and partial_string_info[1] is None - ) - if (src == "\n" or src.endswith("\n\n")) and not in_partial_string: + if line == '\n': self.reset_buffer() print_exception() return src, None self.need_more_lines = True - code = None - except Exception: # pylint: disable=broad-except + except Exception: self.reset_buffer() print_exception() - code = None + return src, None return src, code def reset_buffer(self): @@ -499,28 +179,16 @@ def reset_buffer(self): def settitle(self): """Sets terminal title.""" - env = builtins.__xonsh__.env # pylint: disable=no-member - term = env.get("TERM", None) - # Shells running in emacs sets TERM to "dumb" or "eterm-color". - # Do not set title for these to avoid garbled prompt. - if (term is None and not ON_WINDOWS) or term in [ - "dumb", - "eterm-color", - "linux", - ]: + env = builtins.__xonsh_env__ + term = env.get('TERM', None) + if term is None or term == 'linux': return t = 'gitsome' - if t is None: - return - t = self.prompt_formatter(t) - if ON_WINDOWS and "ANSICON" not in env: - kernel32.SetConsoleTitleW(t) + if ON_WINDOWS and 'ANSICON' not in env: + t = escape_windows_title_string(t) + os.system('title {}'.format(t)) else: - with open(1, "wb", closefd=False) as f: - # prevent xonsh from answering interactive questions - # on the next command by writing the title - f.write("\x1b]0;{0}\x07".format(t).encode()) - f.flush() + sys.stdout.write("\x1b]2;{0}\x07".format(t)) @property def prompt(self): @@ -529,57 +197,33 @@ def prompt(self): if self.mlprompt is None: try: self.mlprompt = multiline_prompt() - except Exception: # pylint: disable=broad-except + except Exception: print_exception() - self.mlprompt = " " + self.mlprompt = ' ' return self.mlprompt - env = builtins.__xonsh__.env # pylint: disable=no-member - p = env.get("PROMPT") + env = builtins.__xonsh_env__ + p = env.get('PROMPT') try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except + p = format_prompt(p) + except Exception: print_exception() self.settitle() return p - def format_color(self, string, hide=False, force_string=False, **kwargs): - """Formats the colors in a string. ``BaseShell``'s default implementation - of this method uses colors based on ANSI color codes. - """ - style = builtins.__xonsh__.env.get("XONSH_COLOR_STYLE") - return ansi_partial_color_format(string, hide=hide, style=style) - - def print_color(self, string, hide=False, **kwargs): - """Prints a string in color. This base implementation's colors are based - on ANSI color codes if a string was given as input. If a list of token - pairs is given, it will color based on pygments, if available. If - pygments is not available, it will print a colorless string. - """ - if isinstance(string, str): - s = self.format_color(string, hide=hide) - elif HAS_PYGMENTS: - # assume this is a list of (Token, str) tuples and format it - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - style_proxy = pyghooks.xonsh_style_proxy(self.styler) - formatter = pyghooks.XonshTerminal256Formatter(style=style_proxy) - s = pygments.format(string, formatter).rstrip() + def _append_history(self, tee_out=None, **info): + hist = builtins.__xonsh_history__ + info['rtn'] = hist.last_cmd_rtn + tee_out = tee_out or None + last_out = hist.last_cmd_out or None + if last_out is None and tee_out is None: + pass + elif last_out is None and tee_out is not None: + info['out'] = tee_out + elif last_out is not None and tee_out is None: + info['out'] = last_out else: - # assume this is a list of (Token, str) tuples and remove color - s = "".join([x for _, x in string]) - print(s, **kwargs) - - def color_style_names(self): - """Returns an iterable of all available style names.""" - return () - - def color_style(self): - """Returns the current color map.""" - return {} - - def restore_tty_sanity(self): - """An interface for resetting the TTY stdin mode. This is highly - dependent on the shell backend. Also it is mostly optional since - it only affects ^Z backgrounding behaviour. - """ - pass + info['out'] = tee_out + '\n' + last_out + hist.append(info) + hist.last_cmd_rtn = hist.last_cmd_out = None + + diff --git a/xonsh/built_ins.py b/xonsh/built_ins.py index 23369c8..36d5707 100644 --- a/xonsh/built_ins.py +++ b/xonsh/built_ins.py @@ -1,123 +1,224 @@ -# -*- coding: utf-8 -*- -"""The xonsh built-ins. - -Note that this module is named 'built_ins' so as not to be confused with the -special Python builtins module. +"""The xonsh built-ins. Note that this module is named 'built_ins' so as +not to be confused with the special Python builtins module. """ -import io import os import re import sys -import types +import time import shlex -import signal import atexit -import pathlib +import signal import inspect -import warnings import builtins -import itertools import subprocess -import contextlib -import collections.abc as cabc - -from xonsh.ast import AST -from xonsh.lazyasd import LazyObject, lazyobject +from io import TextIOWrapper, StringIO +from glob import glob, iglob +from subprocess import Popen, PIPE, STDOUT +from contextlib import contextmanager +from collections import Sequence, MutableMapping, Iterable, namedtuple, \ + MutableSequence, MutableSet + +from xonsh.tools import suggest_commands, XonshError, ON_POSIX, ON_WINDOWS, \ + string_types from xonsh.inspectors import Inspector -from xonsh.aliases import Aliases, make_default_aliases -from xonsh.environ import Env, default_env, locate_binary -from xonsh.jobs import add_job -from xonsh.platform import ON_POSIX, ON_WINDOWS, ON_WSL -from xonsh.proc import ( - PopenThread, - ProcProxyThread, - ProcProxy, - ConsoleParallelReader, - pause_call_resume, - CommandPipeline, - HiddenCommandPipeline, - STDOUT_CAPTURE_KINDS, -) -from xonsh.tools import ( - suggest_commands, - expand_path, - globpath, - XonshError, - XonshCalledProcessError, -) -from xonsh.lazyimps import pty, termios -from xonsh.commands_cache import CommandsCache -from xonsh.events import events - -import xonsh.completers.init - +from xonsh.environ import Env, default_env +from xonsh.aliases import DEFAULT_ALIASES +from xonsh.jobs import add_job, wait_for_active_job +from xonsh.proc import ProcProxy, SimpleProcProxy, TeePTYProc +from xonsh.history import History +from xonsh.foreign_shells import load_foreign_aliases + +ENV = None BUILTINS_LOADED = False -INSPECTOR = LazyObject(Inspector, globals(), "INSPECTOR") - -warnings.filterwarnings("once", category=DeprecationWarning) - - -@lazyobject -def AT_EXIT_SIGNALS(): - sigs = ( - signal.SIGABRT, - signal.SIGFPE, - signal.SIGILL, - signal.SIGSEGV, - signal.SIGTERM, - ) - if ON_POSIX: - sigs += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP) - return sigs +INSPECTOR = Inspector() +AT_EXIT_SIGNALS = (signal.SIGABRT, signal.SIGFPE, signal.SIGILL, signal.SIGSEGV, + signal.SIGTERM) +if ON_POSIX: + AT_EXIT_SIGNALS += (signal.SIGTSTP, signal.SIGQUIT, signal.SIGHUP) def resetting_signal_handle(sig, f): - """Sets a new signal handle that will automatically restore the old value + """Sets a new signal handle that will automaticallly restore the old value once the new handle is finished. """ oldh = signal.getsignal(sig) - def newh(s=None, frame=None): f(s, frame) signal.signal(sig, oldh) if sig != 0: sys.exit(sig) - signal.signal(sig, newh) -def helper(x, name=""): +class Aliases(MutableMapping): + """Represents a location to hold and look up aliases.""" + + def __init__(self, *args, **kwargs): + self._raw = {} + self.update(*args, **kwargs) + + def get(self, key, default=None): + """Returns the (possibly modified) value. If the key is not present, + then `default` is returned. + If the value is callable, it is returned without modification. If it + is an iterable of strings it will be evaluated recursively to expand + other aliases, resulting in a new list or a "partially applied" + callable. + """ + val = self._raw.get(key) + if val is None: + return default + elif isinstance(val, Iterable) or callable(val): + return self.eval_alias(val, seen_tokens={key}) + else: + msg = 'alias of {!r} has an inappropriate type: {!r}' + raise TypeError(msg.format(key, val)) + + def eval_alias(self, value, seen_tokens, acc_args=[]): + """ + "Evaluates" the alias `value`, by recursively looking up the leftmost + token and "expanding" if it's also an alias. + + A value like ["cmd", "arg"] might transform like this: + > ["cmd", "arg"] -> ["ls", "-al", "arg"] -> callable() + where `cmd=ls -al` and `ls` is an alias with its value being a + callable. The resulting callable will be "partially applied" with + ["-al", "arg"]. + """ + # Beware of mutability: default values for keyword args are evaluated + # only once. + if callable(value): + if acc_args: # Partial application + return lambda args, stdin=None: value(acc_args + args, + stdin=stdin) + else: + return value + else: + token, *rest = value + if token in seen_tokens or token not in self._raw: + # ^ Making sure things like `egrep=egrep --color=auto` works, + # and that `l` evals to `ls --color=auto -CF` if `l=ls -CF` + # and `ls=ls --color=auto` + return value + acc_args + else: + return self.eval_alias(self._raw[token], + seen_tokens | {token}, + rest + acc_args) + + # + # Mutable mapping interface + # + + def __getitem__(self, key): + return self._raw[key] + + def __setitem__(self, key, val): + if isinstance(val, string_types): + self._raw[key] = shlex.split(val) + else: + self._raw[key] = val + + def __delitem__(self, key): + del self._raw[key] + + def update(self, *args, **kwargs): + for key, val in dict(*args, **kwargs).items(): + self[key] = val + + def __iter__(self): + yield from self._raw + + def __len__(self): + return len(self._raw) + + def __str__(self): + return str(self._raw) + + def __repr__(self): + return '{0}.{1}({2})'.format(self.__class__.__module__, + self.__class__.__name__, self._raw) + + def _repr_pretty_(self, p, cycle): + name = '{0}.{1}'.format(self.__class__.__module__, + self.__class__.__name__) + with p.group(0, name + '(', ')'): + if cycle: + p.text('...') + elif len(self): + p.break_() + p.pretty(dict(self)) + + +def helper(x, name=''): """Prints help about, and then returns that variable.""" INSPECTOR.pinfo(x, oname=name, detail_level=0) return x -def superhelper(x, name=""): +def superhelper(x, name=''): """Prints help about, and then returns that variable.""" INSPECTOR.pinfo(x, oname=name, detail_level=1) return x +def expand_path(s): + """Takes a string path and expands ~ to home and environment vars.""" + global ENV + if ENV is not None: + ENV.replace_env() + return os.path.expanduser(os.path.expandvars(s)) + + +def expand_case_matching(s): + """Expands a string to a case insenstive globable string.""" + t = [] + openers = {'[', '{'} + closers = {']', '}'} + nesting = 0 + for c in s: + if c in openers: + nesting += 1 + elif c in closers: + nesting -= 1 + elif nesting > 0: + pass + elif c.isalpha(): + folded = c.casefold() + if len(folded) == 1: + c = '[{0}{1}]'.format(c.upper(), c.lower()) + else: + newc = ['[{0}{1}]?'.format(f.upper(), f.lower()) + for f in folded[:-1]] + newc = ''.join(newc) + newc += '[{0}{1}{2}]'.format(folded[-1].upper(), + folded[-1].lower(), + c) + c = newc + t.append(c) + t = ''.join(t) + return t + + def reglob(path, parts=None, i=None): """Regular expression-based globbing.""" if parts is None: path = os.path.normpath(path) drive, tail = os.path.splitdrive(path) parts = tail.split(os.sep) - d = os.sep if os.path.isabs(path) else "." + d = os.sep if os.path.isabs(path) else '.' d = os.path.join(drive, d) return reglob(d, parts, i=0) base = subdir = path if i == 0: if not os.path.isabs(base): - base = "" + base = '' elif len(parts) > 1: i += 1 regex = os.path.join(base, parts[i]) if ON_WINDOWS: # currently unable to access regex backslash sequences # on Windows due to paths using \. - regex = regex.replace("\\", "\\\\") + regex = regex.replace('\\', '\\\\') regex = re.compile(regex) files = os.listdir(subdir) files.sort() @@ -126,777 +227,239 @@ def reglob(path, parts=None, i=None): if i1 == len(parts): for f in files: p = os.path.join(base, f) - if regex.fullmatch(p) is not None: + if regex.match(p) is not None: paths.append(p) else: for f in files: p = os.path.join(base, f) - if regex.fullmatch(p) is None or not os.path.isdir(p): + if regex.match(p) is None or not os.path.isdir(p): continue paths += reglob(p, parts=parts, i=i1) return paths -def path_literal(s): +def regexpath(s): + """Takes a regular expression string and returns a list of file + paths that match the regex. + """ s = expand_path(s) - return pathlib.Path(s) + return reglob(s) -def regexsearch(s): +def globpath(s, ignore_case=False): + """Simple wrapper around glob that also expands home and env vars.""" s = expand_path(s) - return reglob(s) + if ignore_case: + s = expand_case_matching(s) + o = glob(s) + return o if len(o) != 0 else [s] -def globsearch(s): - csc = builtins.__xonsh__.env.get("CASE_SENSITIVE_COMPLETIONS") - glob_sorted = builtins.__xonsh__.env.get("GLOB_SORTED") - dotglob = builtins.__xonsh__.env.get("DOTGLOB") - return globpath( - s, - ignore_case=(not csc), - return_empty=True, - sort_result=glob_sorted, - include_dotfiles=dotglob, - ) +def iglobpath(s, ignore_case=False): + """Simple wrapper around iglob that also expands home and env vars.""" + s = expand_path(s) + if ignore_case: + s = expand_case_matching(s) + return iglob(s) -def pathsearch(func, s, pymode=False, pathobj=False): - """ - Takes a string and returns a list of file paths that match (regex, glob, - or arbitrary search function). If pathobj=True, the return is a list of - pathlib.Path objects instead of strings. - """ - if not callable(func) or len(inspect.signature(func).parameters) != 1: - error = "%r is not a known path search function" - raise XonshError(error % func) - o = func(s) - if pathobj and pymode: - o = list(map(pathlib.Path, o)) - no_match = [] if pymode else [s] - return o if len(o) != 0 else no_match +RE_SHEBANG = re.compile(r'#![ \t]*(.+?)$') -RE_SHEBANG = LazyObject(lambda: re.compile(r"#![ \t]*(.+?)$"), globals(), "RE_SHEBANG") +def _get_runnable_name(fname): + if os.path.isfile(fname) and fname != os.path.basename(fname): + return fname + for d in builtins.__xonsh_env__.get('PATH'): + if os.path.isdir(d): + files = os.listdir(d) + if ON_WINDOWS: + PATHEXT = builtins.__xonsh_env__.get('PATHEXT') + for dirfile in files: + froot, ext = os.path.splitext(dirfile) + if fname == froot and ext.upper() in PATHEXT: + return os.path.join(d, dirfile) + if fname in files: + return os.path.join(d, fname) + return None def _is_binary(fname, limit=80): - with open(fname, "rb") as f: + with open(fname, 'rb') as f: for i in range(limit): char = f.read(1) - if char == b"\0": + if char == b'\0': return True - if char == b"\n": + if char == b'\n': return False - if char == b"": + if char == b'': return False return False def _un_shebang(x): - if x == "/usr/bin/env": + if x == '/usr/bin/env': return [] - elif any(x.startswith(i) for i in ["/usr/bin", "/usr/local/bin", "/bin"]): + elif any(x.startswith(i) for i in ['/usr/bin', '/usr/local/bin', '/bin']): x = os.path.basename(x) - elif x.endswith("python") or x.endswith("python.exe"): - x = "python" - if x == "xonsh": - return ["python", "-m", "xonsh.main"] + elif x.endswith('python') or x.endswith('python.exe'): + x = 'python' + if x == 'xonsh': + return ['python', '-m', 'xonsh.main'] return [x] def get_script_subproc_command(fname, args): - """Given the name of a script outside the path, returns a list representing + """ + Given the name of a script outside the path, returns a list representing an appropriate subprocess command to execute the script. Raises PermissionError if the script is not executable. """ # make sure file is executable if not os.access(fname, os.X_OK): raise PermissionError + if ON_POSIX and not os.access(fname, os.R_OK): - # on some systems, some important programs (e.g. sudo) will have - # execute permissions but not read/write permissions. This enables - # things with the SUID set to be run. Needs to come before _is_binary() - # is called, because that function tries to read the file. + # on some systems, some importnat programs (e.g. sudo) will have execute + # permissions but not read/write permisions. This enables things with the SUID + # set to be run. Needs to come before _is_binary() is called, because that + # function tries to read the file. return [fname] + args elif _is_binary(fname): # if the file is a binary, we should call it directly return [fname] + args + if ON_WINDOWS: # Windows can execute various filetypes directly # as given in PATHEXT _, ext = os.path.splitext(fname) - if ext.upper() in builtins.__xonsh__.env.get("PATHEXT"): + if ext.upper() in builtins.__xonsh_env__.get('PATHEXT', []): return [fname] + args + # find interpreter - with open(fname, "rb") as f: + with open(fname, 'rb') as f: first_line = f.readline().decode().strip() m = RE_SHEBANG.match(first_line) + # xonsh is the default interpreter if m is None: - interp = ["xonsh"] + interp = ['xonsh'] else: interp = m.group(1).strip() if len(interp) > 0: interp = shlex.split(interp) else: - interp = ["xonsh"] + interp = ['xonsh'] + if ON_WINDOWS: o = [] for i in interp: o.extend(_un_shebang(i)) interp = o + return interp + [fname] + args -@lazyobject -def _REDIR_REGEX(): - name = r"(o(?:ut)?|e(?:rr)?|a(?:ll)?|&?\d?)" - return re.compile("{r}(>?>|<){r}$".format(r=name)) - - -_MODES = LazyObject(lambda: {">>": "a", ">": "w", "<": "r"}, globals(), "_MODES") -_WRITE_MODES = LazyObject(lambda: frozenset({"w", "a"}), globals(), "_WRITE_MODES") -_REDIR_ALL = LazyObject(lambda: frozenset({"&", "a", "all"}), globals(), "_REDIR_ALL") -_REDIR_ERR = LazyObject(lambda: frozenset({"2", "e", "err"}), globals(), "_REDIR_ERR") -_REDIR_OUT = LazyObject( - lambda: frozenset({"", "1", "o", "out"}), globals(), "_REDIR_OUT" -) -_E2O_MAP = LazyObject( - lambda: frozenset( - {"{}>{}".format(e, o) for e in _REDIR_ERR for o in _REDIR_OUT if o != ""} - ), - globals(), - "_E2O_MAP", -) -_O2E_MAP = LazyObject( - lambda: frozenset( - {"{}>{}".format(o, e) for e in _REDIR_ERR for o in _REDIR_OUT if o != ""} - ), - globals(), - "_O2E_MAP", -) +def _subproc_pre(): + os.setpgrp() + signal.signal(signal.SIGTSTP, lambda n, f: signal.pause()) + + +_REDIR_NAME = "(o(?:ut)?|e(?:rr)?|a(?:ll)?|&?\d?)" +_REDIR_REGEX = re.compile("{r}(>?>|<){r}$".format(r=_REDIR_NAME)) +_MODES = {'>>': 'a', '>': 'w', '<': 'r'} +_WRITE_MODES = frozenset({'w', 'a'}) +_REDIR_ALL = frozenset({'&', 'a', 'all'}) +_REDIR_ERR = frozenset({'2', 'e', 'err'}) +_REDIR_OUT = frozenset({'', '1', 'o', 'out'}) +_E2O_MAP = frozenset({'{}>{}'.format(e, o) + for e in _REDIR_ERR + for o in _REDIR_OUT + if o != ''}) def _is_redirect(x): return isinstance(x, str) and _REDIR_REGEX.match(x) -def safe_open(fname, mode, buffering=-1): - """Safely attempts to open a file in for xonsh subprocs.""" +def _open(fname, mode): # file descriptors + if isinstance(fname, int): + return fname try: - return io.open(fname, mode, buffering=buffering) - except PermissionError: - raise XonshError("xonsh: {0}: permission denied".format(fname)) - except FileNotFoundError: - raise XonshError("xonsh: {0}: no such file or directory".format(fname)) + return open(fname, mode) except Exception: - raise XonshError("xonsh: {0}: unable to open file".format(fname)) + raise XonshError('xonsh: {0}: no such file or directory'.format(fname)) -def safe_close(x): - """Safely attempts to close an object.""" - if not isinstance(x, io.IOBase): - return - if x.closed: +def _redirect_io(streams, r, loc=None): + # special case of redirecting stderr to stdout + if r.replace('&', '') in _E2O_MAP: + if 'stderr' in streams: + raise XonshError('Multiple redirects for stderr') + streams['stderr'] = STDOUT return - try: - x.close() - except Exception: - pass - -def _parse_redirects(r, loc=None): - """returns origin, mode, destination tuple""" orig, mode, dest = _REDIR_REGEX.match(r).groups() + # redirect to fd - if dest.startswith("&"): + if dest.startswith('&'): try: dest = int(dest[1:]) if loc is None: - loc, dest = dest, "" # NOQA + loc, dest = dest, '' else: - e = "Unrecognized redirection command: {}".format(r) + e = 'Unrecognized redirection command: {}'.format(r) raise XonshError(e) except (ValueError, XonshError): raise except Exception: pass - mode = _MODES.get(mode, None) - if mode == "r" and (len(orig) > 0 or len(dest) > 0): - raise XonshError("Unrecognized redirection command: {}".format(r)) - elif mode in _WRITE_MODES and len(dest) > 0: - raise XonshError("Unrecognized redirection command: {}".format(r)) - return orig, mode, dest + mode = _MODES.get(mode, None) -def _redirect_streams(r, loc=None): - """Returns stdin, stdout, stderr tuple of redirections.""" - stdin = stdout = stderr = None - no_ampersand = r.replace("&", "") - # special case of redirecting stderr to stdout - if no_ampersand in _E2O_MAP: - stderr = subprocess.STDOUT - return stdin, stdout, stderr - elif no_ampersand in _O2E_MAP: - stdout = 2 # using 2 as a flag, rather than using a file object - return stdin, stdout, stderr - # get streams - orig, mode, dest = _parse_redirects(r) - if mode == "r": - stdin = safe_open(loc, mode) + if mode == 'r': + if len(orig) > 0 or len(dest) > 0: + raise XonshError('Unrecognized redirection command: {}'.format(r)) + elif 'stdin' in streams: + raise XonshError('Multiple inputs for stdin') + else: + streams['stdin'] = _open(loc, mode) elif mode in _WRITE_MODES: if orig in _REDIR_ALL: - stdout = stderr = safe_open(loc, mode) - elif orig in _REDIR_OUT: - stdout = safe_open(loc, mode) + if 'stderr' in streams: + raise XonshError('Multiple redirects for stderr') + elif 'stdout' in streams: + raise XonshError('Multiple redirects for stdout') + elif len(dest) > 0: + e = 'Unrecognized redirection command: {}'.format(r) + raise XonshError(e) + targets = ['stdout', 'stderr'] elif orig in _REDIR_ERR: - stderr = safe_open(loc, mode) - else: - raise XonshError("Unrecognized redirection command: {}".format(r)) - else: - raise XonshError("Unrecognized redirection command: {}".format(r)) - return stdin, stdout, stderr - - -def default_signal_pauser(n, f): - """Pauses a signal, as needed.""" - signal.pause() - - -def no_pg_xonsh_preexec_fn(): - """Default subprocess preexec function for when there is no existing - pipeline group. - """ - os.setpgrp() - signal.signal(signal.SIGTSTP, default_signal_pauser) - - -class SubprocSpec: - """A container for specifying how a subprocess command should be - executed. - """ - - kwnames = ("stdin", "stdout", "stderr", "universal_newlines", "close_fds") - - def __init__( - self, - cmd, - cls=subprocess.Popen, - stdin=None, - stdout=None, - stderr=None, - universal_newlines=False, - close_fds=False, - captured=False, - ): - """ - Parameters - ---------- - cmd : list of str - Command to be run. - cls : Popen-like - Class to run the subprocess with. - stdin : file-like - Popen file descriptor or flag for stdin. - stdout : file-like - Popen file descriptor or flag for stdout. - stderr : file-like - Popen file descriptor or flag for stderr. - universal_newlines : bool - Whether or not to use universal newlines. - close_fds : bool - Whether or not to close the file descriptiors when the - process exits. - captured : bool or str, optional - The flag for if the subprocess is captured, may be one of: - False for $[], 'stdout' for $(), 'hiddenobject' for ![], or - 'object' for !(). - - Attributes - ---------- - args : list of str - Arguments as originally supplied. - alias : list of str, callable, or None - The alias that was resolved for this command, if any. - binary_loc : str or None - Path to binary to execute. - is_proxy : bool - Whether or not the subprocess is or should be run as a proxy. - background : bool - Whether or not the subprocess should be started in the background. - threadable : bool - Whether or not the subprocess is able to be run in a background - thread, rather than the main thread. - pipeline_index : int or None - The index number of this sepc into the pipeline that is being setup. - last_in_pipeline : bool - Whether the subprocess is the last in the execution pipeline. - captured_stdout : file-like - Handle to captured stdin - captured_stderr : file-like - Handle to captured stderr - stack : list of FrameInfo namedtuples or None - The stack of the call-site of alias, if the alias requires it. - None otherwise. - """ - self._stdin = self._stdout = self._stderr = None - # args - self.cmd = list(cmd) - self.cls = cls - self.stdin = stdin - self.stdout = stdout - self.stderr = stderr - self.universal_newlines = universal_newlines - self.close_fds = close_fds - self.captured = captured - # pure attrs - self.args = list(cmd) - self.alias = None - self.binary_loc = None - self.is_proxy = False - self.background = False - self.threadable = True - self.pipeline_index = None - self.last_in_pipeline = False - self.captured_stdout = None - self.captured_stderr = None - self.stack = None - - def __str__(self): - s = self.__class__.__name__ + "(" + str(self.cmd) + ", " - s += self.cls.__name__ + ", " - kws = [n + "=" + str(getattr(self, n)) for n in self.kwnames] - s += ", ".join(kws) + ")" - return s - - def __repr__(self): - s = self.__class__.__name__ + "(" + repr(self.cmd) + ", " - s += self.cls.__name__ + ", " - kws = [n + "=" + repr(getattr(self, n)) for n in self.kwnames] - s += ", ".join(kws) + ")" - return s - - # - # Properties - # - - @property - def stdin(self): - return self._stdin - - @stdin.setter - def stdin(self, value): - if self._stdin is None: - self._stdin = value - elif value is None: - pass - else: - safe_close(value) - msg = "Multiple inputs for stdin for {0!r}" - msg = msg.format(" ".join(self.args)) - raise XonshError(msg) - - @property - def stdout(self): - return self._stdout - - @stdout.setter - def stdout(self, value): - if self._stdout is None: - self._stdout = value - elif value is None: - pass - else: - safe_close(value) - msg = "Multiple redirections for stdout for {0!r}" - msg = msg.format(" ".join(self.args)) - raise XonshError(msg) - - @property - def stderr(self): - return self._stderr - - @stderr.setter - def stderr(self, value): - if self._stderr is None: - self._stderr = value - elif value is None: - pass - else: - safe_close(value) - msg = "Multiple redirections for stderr for {0!r}" - msg = msg.format(" ".join(self.args)) - raise XonshError(msg) - - # - # Execution methods - # - - def run(self, *, pipeline_group=None): - """Launches the subprocess and returns the object.""" - event_name = self._cmd_event_name() - self._pre_run_event_fire(event_name) - kwargs = {n: getattr(self, n) for n in self.kwnames} - self.prep_env(kwargs) - self.prep_preexec_fn(kwargs, pipeline_group=pipeline_group) - if callable(self.alias): - if "preexec_fn" in kwargs: - kwargs.pop("preexec_fn") - p = self.cls(self.alias, self.cmd, **kwargs) - else: - self._fix_null_cmd_bytes() - p = self._run_binary(kwargs) - p.spec = self - p.last_in_pipeline = self.last_in_pipeline - p.captured_stdout = self.captured_stdout - p.captured_stderr = self.captured_stderr - self._post_run_event_fire(event_name, p) - return p - - def _run_binary(self, kwargs): - try: - bufsize = 1 - p = self.cls(self.cmd, bufsize=bufsize, **kwargs) - except PermissionError: - e = "xonsh: subprocess mode: permission denied: {0}" - raise XonshError(e.format(self.cmd[0])) - except FileNotFoundError: - cmd0 = self.cmd[0] - e = "xonsh: subprocess mode: command not found: {0}".format(cmd0) - env = builtins.__xonsh__.env - sug = suggest_commands(cmd0, env, builtins.aliases) - if len(sug.strip()) > 0: - e += "\n" + suggest_commands(cmd0, env, builtins.aliases) - raise XonshError(e) - return p - - def prep_env(self, kwargs): - """Prepares the environment to use in the subprocess.""" - denv = builtins.__xonsh__.env.detype() - if ON_WINDOWS: - # Over write prompt variable as xonsh's $PROMPT does - # not make much sense for other subprocs - denv["PROMPT"] = "$P$G" - kwargs["env"] = denv - - def prep_preexec_fn(self, kwargs, pipeline_group=None): - """Prepares the 'preexec_fn' keyword argument""" - if not ON_POSIX: - return - if not builtins.__xonsh__.env.get("XONSH_INTERACTIVE"): - return - if pipeline_group is None or ON_WSL: - # If there is no pipeline group - # or the platform is windows subsystem for linux (WSL) - xonsh_preexec_fn = no_pg_xonsh_preexec_fn - else: - - def xonsh_preexec_fn(): - """Preexec function bound to a pipeline group.""" - os.setpgid(0, pipeline_group) - signal.signal(signal.SIGTSTP, default_signal_pauser) - - kwargs["preexec_fn"] = xonsh_preexec_fn - - def _fix_null_cmd_bytes(self): - # Popen does not accept null bytes in its input commands. - # That doesn't stop some subprocesses from using them. Here we - # escape them just in case. - cmd = self.cmd - for i in range(len(cmd)): - cmd[i] = cmd[i].replace("\0", "\\0") - - def _cmd_event_name(self): - if callable(self.alias): - return self.alias.__name__ - elif self.binary_loc is None: - return "" + if 'stderr' in streams: + raise XonshError('Multiple redirects for stderr') + elif len(dest) > 0: + e = 'Unrecognized redirection command: {}'.format(r) + raise XonshError(e) + targets = ['stderr'] + elif orig in _REDIR_OUT: + if 'stdout' in streams: + raise XonshError('Multiple redirects for stdout') + elif len(dest) > 0: + e = 'Unrecognized redirection command: {}'.format(r) + raise XonshError(e) + targets = ['stdout'] else: - return os.path.basename(self.binary_loc) + raise XonshError('Unrecognized redirection command: {}'.format(r)) - def _pre_run_event_fire(self, name): - event_name = "on_pre_spec_run_" + name - if events.exists(event_name): - event = getattr(events, event_name) - event.fire(spec=self) + f = _open(loc, mode) + for t in targets: + streams[t] = f - def _post_run_event_fire(self, name, proc): - event_name = "on_post_spec_run_" + name - if events.exists(event_name): - event = getattr(events, event_name) - event.fire(spec=self, proc=proc) - - # - # Building methods - # - - @classmethod - def build(kls, cmd, *, cls=subprocess.Popen, **kwargs): - """Creates an instance of the subprocess command, with any - modifications and adjustments based on the actual cmd that - was received. - """ - # modifications that do not alter cmds may come before creating instance - spec = kls(cmd, cls=cls, **kwargs) - # modifications that alter cmds must come after creating instance - # perform initial redirects - spec.redirect_leading() - spec.redirect_trailing() - # apply aliases - spec.resolve_alias() - spec.resolve_binary_loc() - spec.resolve_auto_cd() - spec.resolve_executable_commands() - spec.resolve_alias_cls() - spec.resolve_stack() - return spec - - def redirect_leading(self): - """Manage leading redirects such as with '< input.txt COMMAND'. """ - while len(self.cmd) >= 3 and self.cmd[0] == "<": - self.stdin = safe_open(self.cmd[1], "r") - self.cmd = self.cmd[2:] - - def redirect_trailing(self): - """Manages trailing redirects.""" - while True: - cmd = self.cmd - if len(cmd) >= 3 and _is_redirect(cmd[-2]): - streams = _redirect_streams(cmd[-2], cmd[-1]) - self.stdin, self.stdout, self.stderr = streams - self.cmd = cmd[:-2] - elif len(cmd) >= 2 and _is_redirect(cmd[-1]): - streams = _redirect_streams(cmd[-1]) - self.stdin, self.stdout, self.stderr = streams - self.cmd = cmd[:-1] - else: - break - - def resolve_alias(self): - """Sets alias in command, if applicable.""" - cmd0 = self.cmd[0] - if callable(cmd0): - alias = cmd0 - else: - alias = builtins.aliases.get(cmd0, None) - self.alias = alias - - def resolve_binary_loc(self): - """Sets the binary location""" - alias = self.alias - if alias is None: - binary_loc = locate_binary(self.cmd[0]) - elif callable(alias): - binary_loc = None - else: - binary_loc = locate_binary(alias[0]) - self.binary_loc = binary_loc - - def resolve_auto_cd(self): - """Implements AUTO_CD functionality.""" - if not ( - self.alias is None - and self.binary_loc is None - and len(self.cmd) == 1 - and builtins.__xonsh__.env.get("AUTO_CD") - and os.path.isdir(self.cmd[0]) - ): - return - self.cmd.insert(0, "cd") - self.alias = builtins.aliases.get("cd", None) - - def resolve_executable_commands(self): - """Resolve command executables, if applicable.""" - alias = self.alias - if alias is None: - pass - elif callable(alias): - self.cmd.pop(0) - return - else: - self.cmd = alias + self.cmd[1:] - # resolve any redirects the aliases may have applied - self.redirect_leading() - self.redirect_trailing() - if self.binary_loc is None: - return - try: - self.cmd = get_script_subproc_command(self.binary_loc, self.cmd[1:]) - except PermissionError: - e = "xonsh: subprocess mode: permission denied: {0}" - raise XonshError(e.format(self.cmd[0])) - - def resolve_alias_cls(self): - """Determine which proxy class to run an alias with.""" - alias = self.alias - if not callable(alias): - return - self.is_proxy = True - thable = getattr(alias, "__xonsh_threadable__", True) - cls = ProcProxyThread if thable else ProcProxy - self.cls = cls - self.threadable = thable - # also check capturability, while we are here - cpable = getattr(alias, "__xonsh_capturable__", self.captured) - self.captured = cpable - - def resolve_stack(self): - """Computes the stack for a callable alias's call-site, if needed.""" - if not callable(self.alias): - return - # check that we actual need the stack - sig = inspect.signature(self.alias) - if len(sig.parameters) <= 5 and "stack" not in sig.parameters: - return - # compute the stack, and filter out these build methods - # run_subproc() is the 4th command in the stack - # we want to filter out one up, e.g. subproc_captured_hiddenobject() - # after that the stack from the call site starts. - stack = inspect.stack(context=0) - assert stack[3][3] == "run_subproc", "xonsh stack has changed!" - del stack[:5] - self.stack = stack - - -def _safe_pipe_properties(fd, use_tty=False): - """Makes sure that a pipe file descriptor properties are sane.""" - if not use_tty: - return - # due to some weird, long standing issue in Python, PTYs come out - # replacing newline \n with \r\n. This causes issues for raw unix - # protocols, like git and ssh, which expect unix line endings. - # see https://mail.python.org/pipermail/python-list/2013-June/650460.html - # for more details and the following solution. - props = termios.tcgetattr(fd) - props[1] = props[1] & (~termios.ONLCR) | termios.ONLRET - termios.tcsetattr(fd, termios.TCSANOW, props) - - -def _update_last_spec(last): - captured = last.captured - last.last_in_pipeline = True - if not captured: - return - callable_alias = callable(last.alias) - if callable_alias: - pass else: - cmds_cache = builtins.__xonsh__.commands_cache - thable = cmds_cache.predict_threadable( - last.args - ) and cmds_cache.predict_threadable(last.cmd) - if captured and thable: - last.cls = PopenThread - elif not thable: - # foreground processes should use Popen - last.threadable = False - if captured == "object" or captured == "hiddenobject": - # CommandPipeline objects should not pipe stdout, stderr - return - # cannot used PTY pipes for aliases, for some dark reason, - # and must use normal pipes instead. - use_tty = ON_POSIX and not callable_alias - # Do not set standard in! Popen is not a fan of redirections here - # set standard out - if last.stdout is not None: - last.universal_newlines = True - elif captured in STDOUT_CAPTURE_KINDS: - last.universal_newlines = False - r, w = os.pipe() - last.stdout = safe_open(w, "wb") - last.captured_stdout = safe_open(r, "rb") - elif builtins.__xonsh__.stdout_uncaptured is not None: - last.universal_newlines = True - last.stdout = builtins.__xonsh__.stdout_uncaptured - last.captured_stdout = last.stdout - elif ON_WINDOWS and not callable_alias: - last.universal_newlines = True - last.stdout = None # must truly stream on windows - last.captured_stdout = ConsoleParallelReader(1) - else: - last.universal_newlines = True - r, w = pty.openpty() if use_tty else os.pipe() - _safe_pipe_properties(w, use_tty=use_tty) - last.stdout = safe_open(w, "w") - _safe_pipe_properties(r, use_tty=use_tty) - last.captured_stdout = safe_open(r, "r") - # set standard error - if last.stderr is not None: - pass - elif captured == "object": - r, w = os.pipe() - last.stderr = safe_open(w, "w") - last.captured_stderr = safe_open(r, "r") - elif builtins.__xonsh__.stderr_uncaptured is not None: - last.stderr = builtins.__xonsh__.stderr_uncaptured - last.captured_stderr = last.stderr - elif ON_WINDOWS and not callable_alias: - last.universal_newlines = True - last.stderr = None # must truly stream on windows - else: - r, w = pty.openpty() if use_tty else os.pipe() - _safe_pipe_properties(w, use_tty=use_tty) - last.stderr = safe_open(w, "w") - _safe_pipe_properties(r, use_tty=use_tty) - last.captured_stderr = safe_open(r, "r") - # redirect stdout to stderr, if we should - if isinstance(last.stdout, int) and last.stdout == 2: - # need to use private interface to avoid duplication. - last._stdout = last.stderr - # redirect stderr to stdout, if we should - if callable_alias and last.stderr == subprocess.STDOUT: - last._stderr = last.stdout - last.captured_stderr = last.captured_stdout - - -def cmds_to_specs(cmds, captured=False): - """Converts a list of cmds to a list of SubprocSpec objects that are - ready to be executed. - """ - # first build the subprocs independently and separate from the redirects - i = 0 - specs = [] - redirects = [] - for cmd in cmds: - if isinstance(cmd, str): - redirects.append(cmd) - else: - if cmd[-1] == "&": - cmd = cmd[:-1] - redirects.append("&") - spec = SubprocSpec.build(cmd, captured=captured) - spec.pipeline_index = i - specs.append(spec) - i += 1 - # now modify the subprocs based on the redirects. - for i, redirect in enumerate(redirects): - if redirect == "|": - # these should remain integer file descriptors, and not Python - # file objects since they connect processes. - r, w = os.pipe() - specs[i].stdout = w - specs[i + 1].stdin = r - elif redirect == "&" and i == len(redirects) - 1: - specs[-1].background = True - else: - raise XonshError("unrecognized redirect {0!r}".format(redirect)) - # Apply boundary conditions - _update_last_spec(specs[-1]) - return specs - - -def _should_set_title(captured=False): - env = builtins.__xonsh__.env - return ( - env.get("XONSH_INTERACTIVE") - and not env.get("XONSH_STORE_STDOUT") - and captured not in STDOUT_CAPTURE_KINDS - and builtins.__xonsh__.shell is not None - ) + raise XonshError('Unrecognized redirection command: {}'.format(r)) -def run_subproc(cmds, captured=False): +def run_subproc(cmds, captured=True): """Runs a subprocess, in its many forms. This takes a list of 'commands,' which may be a list of command line arguments or a string, representing a special connecting character. For example:: @@ -909,82 +472,153 @@ def run_subproc(cmds, captured=False): Lastly, the captured argument affects only the last real command. """ - specs = cmds_to_specs(cmds, captured=captured) - captured = specs[-1].captured - if captured == "hiddenobject": - command = HiddenCommandPipeline(specs) - else: - command = CommandPipeline(specs) - proc = command.proc - background = command.spec.background - if not all(x.is_proxy for x in specs): - add_job( - { - "cmds": cmds, - "pids": [i.pid for i in command.procs], - "obj": proc, - "bg": background, - "pipeline": command, - "pgrp": command.term_pgid, - } - ) - if _should_set_title(captured=captured): - # set title here to get currently executing command - pause_call_resume(proc, builtins.__xonsh__.shell.settitle) - else: - # for some reason, some programs are in a stopped state when the flow - # reaches this point, hence a SIGCONT should be sent to `proc` to make - # sure that the shell doesn't hang. This `pause_call_resume` invocation - # does this - pause_call_resume(proc, int) - # create command or return if backgrounding. + global ENV + background = False + if cmds[-1] == '&': + background = True + cmds = cmds[:-1] + write_target = None + last_cmd = len(cmds) - 1 + prev = None + procs = [] + prev_proc = None + for ix, cmd in enumerate(cmds): + stdin = None + stdout = None + stderr = None + if isinstance(cmd, string_types): + prev = cmd + continue + streams = {} + while True: + if len(cmd) >= 3 and _is_redirect(cmd[-2]): + _redirect_io(streams, cmd[-2], cmd[-1]) + cmd = cmd[:-2] + elif len(cmd) >= 2 and _is_redirect(cmd[-1]): + _redirect_io(streams, cmd[-1]) + cmd = cmd[:-1] + elif len(cmd) >= 3 and cmd[0] == '<': + _redirect_io(streams, cmd[0], cmd[1]) + cmd = cmd[2:] + else: + break + # set standard input + if 'stdin' in streams: + if prev_proc is not None: + raise XonshError('Multiple inputs for stdin') + stdin = streams['stdin'] + elif prev_proc is not None: + stdin = prev_proc.stdout + # set standard output + if 'stdout' in streams: + if ix != last_cmd: + raise XonshError('Multiple redirects for stdout') + stdout = streams['stdout'] + elif captured or ix != last_cmd: + stdout = PIPE + else: + stdout = None + # set standard error + if 'stderr' in streams: + stderr = streams['stderr'] + uninew = (ix == last_cmd) and (not captured) + alias = builtins.aliases.get(cmd[0], None) + if callable(alias): + aliased_cmd = alias + else: + if alias is not None: + cmd = alias + cmd[1:] + n = _get_runnable_name(cmd[0]) + if n is None: + aliased_cmd = cmd + else: + try: + aliased_cmd = get_script_subproc_command(n, cmd[1:]) + except PermissionError: + e = 'xonsh: subprocess mode: permission denied: {0}' + raise XonshError(e.format(cmd[0])) + if callable(aliased_cmd): + prev_is_proxy = True + numargs = len(inspect.signature(aliased_cmd).parameters) + if numargs == 2: + cls = SimpleProcProxy + elif numargs == 4: + cls = ProcProxy + else: + e = 'Expected callable with 2 or 4 arguments, not {}' + raise XonshError(e.format(numargs)) + proc = cls(aliased_cmd, cmd[1:], + stdin, stdout, stderr, + universal_newlines=uninew) + else: + prev_is_proxy = False + usetee = (stdout is None) and (not background) and \ + ENV.get('XONSH_STORE_STDOUT', False) + cls = TeePTYProc if usetee else Popen + subproc_kwargs = {} + if ON_POSIX and cls is Popen: + subproc_kwargs['preexec_fn'] = _subproc_pre + try: + proc = cls(aliased_cmd, + universal_newlines=uninew, + env=ENV.detype(), + stdin=stdin, + stdout=stdout, + stderr=stderr, + **subproc_kwargs) + except PermissionError: + e = 'xonsh: subprocess mode: permission denied: {0}' + raise XonshError(e.format(aliased_cmd[0])) + except FileNotFoundError: + cmd = aliased_cmd[0] + e = 'xonsh: subprocess mode: command not found: {0}'.format(cmd) + sug = suggest_commands(cmd, ENV, builtins.aliases) + if len(sug.strip()) > 0: + e += '\n' + suggest_commands(cmd, ENV, builtins.aliases) + raise XonshError(e) + procs.append(proc) + prev = None + prev_proc = proc + for proc in procs[:-1]: + try: + proc.stdout.close() + except OSError: + pass + if not prev_is_proxy: + add_job({ + 'cmds': cmds, + 'pids': [i.pid for i in procs], + 'obj': prev_proc, + 'bg': background + }) if background: return - # now figure out what we should return. - if captured == "stdout": - command.end() - return command.output - elif captured == "object": - return command - elif captured == "hiddenobject": - command.end() - return command - else: - command.end() - return + if prev_is_proxy: + prev_proc.wait() + wait_for_active_job() + hist = builtins.__xonsh_history__ + hist.last_cmd_rtn = prev_proc.returncode + if write_target is None: + # get output + output = '' + if prev_proc.stdout not in (None, sys.stdout): + output = prev_proc.stdout.read() + if captured: + # to get proper encoding from Popen, we have to + # use a byte stream and then implement universal_newlines here + output = output.decode(encoding=ENV.get('XONSH_ENCODING'), + errors=ENV.get('XONSH_ENCODING_ERRORS')) + output = output.replace('\r\n', '\n') + return output + else: + hist.last_cmd_out = output -def subproc_captured_stdout(*cmds): +def subproc_captured(*cmds): """Runs a subprocess, capturing the output. Returns the stdout that was produced as a str. """ - return run_subproc(cmds, captured="stdout") - - -def subproc_captured_inject(*cmds): - """Runs a subprocess, capturing the output. Returns a list of - whitespace-separated strings of the stdout that was produced. - The string is split using xonsh's lexer, rather than Python's str.split() - or shlex.split(). - """ - s = run_subproc(cmds, captured="stdout") - toks = builtins.__xonsh__.execer.parser.lexer.split(s.strip()) - return toks - - -def subproc_captured_object(*cmds): - """ - Runs a subprocess, capturing the output. Returns an instance of - CommandPipeline representing the completed command. - """ - return run_subproc(cmds, captured="object") - - -def subproc_captured_hiddenobject(*cmds): - """Runs a subprocess, capturing the output. Returns an instance of - HiddenCommandPipeline representing the completed command. - """ - return run_subproc(cmds, captured="hiddenobject") + return run_subproc(cmds, captured=True) def subproc_uncaptured(*cmds): @@ -996,650 +630,103 @@ def subproc_uncaptured(*cmds): def ensure_list_of_strs(x): """Ensures that x is a list of strings.""" - if isinstance(x, str): - rtn = [x] - elif isinstance(x, cabc.Sequence): - rtn = [i if isinstance(i, str) else str(i) for i in x] - else: - rtn = [str(x)] - return rtn - - -def list_of_strs_or_callables(x): - """Ensures that x is a list of strings or functions""" - if isinstance(x, str) or callable(x): + if isinstance(x, string_types): rtn = [x] - elif isinstance(x, cabc.Iterable): - rtn = [i if isinstance(i, str) or callable(i) else str(i) for i in x] + elif isinstance(x, Sequence): + rtn = [i if isinstance(i, string_types) else str(i) for i in x] else: rtn = [str(x)] return rtn -def list_of_list_of_strs_outer_product(x): - """Takes an outer product of a list of strings""" - lolos = map(ensure_list_of_strs, x) - rtn = [] - for los in itertools.product(*lolos): - s = "".join(los) - if "*" in s: - rtn.extend(builtins.__xonsh__.glob(s)) - else: - rtn.append(builtins.__xonsh__.expand_path(s)) - return rtn - - -@lazyobject -def MACRO_FLAG_KINDS(): - return { - "s": str, - "str": str, - "string": str, - "a": AST, - "ast": AST, - "c": types.CodeType, - "code": types.CodeType, - "compile": types.CodeType, - "v": eval, - "eval": eval, - "x": exec, - "exec": exec, - "t": type, - "type": type, - } - - -def _convert_kind_flag(x): - """Puts a kind flag (string) a canonical form.""" - x = x.lower() - kind = MACRO_FLAG_KINDS.get(x, None) - if kind is None: - raise TypeError("{0!r} not a recognized macro type.".format(x)) - return kind - - -def convert_macro_arg(raw_arg, kind, glbs, locs, *, name="", macroname=""): - """Converts a string macro argument based on the requested kind. - - Parameters - ---------- - raw_arg : str - The str representation of the macro argument. - kind : object - A flag or type representing how to convert the argument. - glbs : Mapping - The globals from the call site. - locs : Mapping or None - The locals from the call site. - name : str, optional - The macro argument name. - macroname : str, optional - The name of the macro itself. - - Returns - ------- - The converted argument. - """ - # munge kind and mode to start - mode = None - if isinstance(kind, cabc.Sequence) and not isinstance(kind, str): - # have (kind, mode) tuple - kind, mode = kind - if isinstance(kind, str): - kind = _convert_kind_flag(kind) - if kind is str or kind is None: - return raw_arg # short circuit since there is nothing else to do - # select from kind and convert - execer = builtins.__xonsh__.execer - filename = macroname + "(" + name + ")" - if kind is AST: - ctx = set(dir(builtins)) | set(glbs.keys()) - if locs is not None: - ctx |= set(locs.keys()) - mode = mode or "eval" - if mode != "eval" and not raw_arg.endswith("\n"): - raw_arg += "\n" - arg = execer.parse(raw_arg, ctx, mode=mode, filename=filename) - elif kind is types.CodeType or kind is compile: # NOQA - mode = mode or "eval" - arg = execer.compile( - raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename - ) - elif kind is eval: - arg = execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename) - elif kind is exec: - mode = mode or "exec" - if not raw_arg.endswith("\n"): - raw_arg += "\n" - arg = execer.exec(raw_arg, mode=mode, glbs=glbs, locs=locs, filename=filename) - elif kind is type: - arg = type(execer.eval(raw_arg, glbs=glbs, locs=locs, filename=filename)) - else: - msg = "kind={0!r} and mode={1!r} was not recognized for macro " "argument {2!r}" - raise TypeError(msg.format(kind, mode, name)) - return arg - - -@contextlib.contextmanager -def in_macro_call(f, glbs, locs): - """Attaches macro globals and locals temporarily to function as a - context manager. - - Parameters - ---------- - f : callable object - The function that is called as ``f(*args)``. - glbs : Mapping - The globals from the call site. - locs : Mapping or None - The locals from the call site. - """ - prev_glbs = getattr(f, "macro_globals", None) - prev_locs = getattr(f, "macro_locals", None) - f.macro_globals = glbs - f.macro_locals = locs - yield - if prev_glbs is None: - del f.macro_globals - else: - f.macro_globals = prev_glbs - if prev_locs is None: - del f.macro_locals - else: - f.macro_locals = prev_locs - - -def call_macro(f, raw_args, glbs, locs): - """Calls a function as a macro, returning its result. - - Parameters - ---------- - f : callable object - The function that is called as ``f(*args)``. - raw_args : tuple of str - The str representation of arguments of that were passed into the - macro. These strings will be parsed, compiled, evaled, or left as - a string depending on the annotations of f. - glbs : Mapping - The globals from the call site. - locs : Mapping or None - The locals from the call site. - """ - sig = inspect.signature(f) - empty = inspect.Parameter.empty - macroname = f.__name__ - i = 0 - args = [] - for (key, param), raw_arg in zip(sig.parameters.items(), raw_args): - i += 1 - if raw_arg == "*": - break - kind = param.annotation - if kind is empty or kind is None: - kind = str - arg = convert_macro_arg( - raw_arg, kind, glbs, locs, name=key, macroname=macroname - ) - args.append(arg) - reg_args, kwargs = _eval_regular_args(raw_args[i:], glbs, locs) - args += reg_args - with in_macro_call(f, glbs, locs): - rtn = f(*args, **kwargs) - return rtn - - -@lazyobject -def KWARG_RE(): - return re.compile(r"([A-Za-z_]\w*=|\*\*)") - - -def _starts_as_arg(s): - """Tests if a string starts as a non-kwarg string would.""" - return KWARG_RE.match(s) is None - - -def _eval_regular_args(raw_args, glbs, locs): - if not raw_args: - return [], {} - arglist = list(itertools.takewhile(_starts_as_arg, raw_args)) - kwarglist = raw_args[len(arglist) :] - execer = builtins.__xonsh__.execer - if not arglist: - args = arglist - kwargstr = "dict({})".format(", ".join(kwarglist)) - kwargs = execer.eval(kwargstr, glbs=glbs, locs=locs) - elif not kwarglist: - argstr = "({},)".format(", ".join(arglist)) - args = execer.eval(argstr, glbs=glbs, locs=locs) - kwargs = {} - else: - argstr = "({},)".format(", ".join(arglist)) - kwargstr = "dict({})".format(", ".join(kwarglist)) - both = "({}, {})".format(argstr, kwargstr) - args, kwargs = execer.eval(both, glbs=glbs, locs=locs) - return args, kwargs - - -def enter_macro(obj, raw_block, glbs, locs): - """Prepares to enter a context manager macro by attaching the contents - of the macro block, globals, and locals to the object. These modifications - are made in-place and the original object is returned. - - - Parameters - ---------- - obj : context manager - The object that is about to be entered via a with-statement. - raw_block : str - The str of the block that is the context body. - This string will be parsed, compiled, evaled, or left as - a string depending on the return annotation of obj.__enter__. - glbs : Mapping - The globals from the context site. - locs : Mapping or None - The locals from the context site. - - Returns - ------- - obj : context manager - The same context manager but with the new macro information applied. - """ - # recurse down sequences - if isinstance(obj, cabc.Sequence): - for x in obj: - enter_macro(x, raw_block, glbs, locs) - return obj - # convert block as needed - kind = getattr(obj, "__xonsh_block__", str) - macroname = getattr(obj, "__name__", "") - block = convert_macro_arg( - raw_block, kind, glbs, locs, name="", macroname=macroname - ) - # attach attrs - obj.macro_globals = glbs - obj.macro_locals = locs - obj.macro_block = block - return obj - - -def load_builtins(execer=None, ctx=None): +def load_builtins(execer=None): """Loads the xonsh builtins into the Python builtins. Sets the BUILTINS_LOADED variable to True. """ - global BUILTINS_LOADED - if not hasattr(builtins, "__xonsh__"): - builtins.__xonsh__ = XonshSession(execer=execer, ctx=ctx) - builtins.__xonsh__.load(execer=execer, ctx=ctx) - builtins.__xonsh__.link_builtins(execer=execer) + global BUILTINS_LOADED, ENV + # private built-ins + builtins.__xonsh_env__ = ENV = Env(default_env()) + builtins.__xonsh_ctx__ = {} + builtins.__xonsh_help__ = helper + builtins.__xonsh_superhelp__ = superhelper + builtins.__xonsh_regexpath__ = regexpath + builtins.__xonsh_glob__ = globpath + builtins.__xonsh_exit__ = False + if hasattr(builtins, 'exit'): + builtins.__xonsh_pyexit__ = builtins.exit + del builtins.exit + if hasattr(builtins, 'quit'): + builtins.__xonsh_pyquit__ = builtins.quit + del builtins.quit + builtins.__xonsh_subproc_captured__ = subproc_captured + builtins.__xonsh_subproc_uncaptured__ = subproc_uncaptured + builtins.__xonsh_execer__ = execer + builtins.__xonsh_all_jobs__ = {} + builtins.__xonsh_active_job__ = None + builtins.__xonsh_ensure_list_of_strs__ = ensure_list_of_strs + # public built-ins + builtins.evalx = None if execer is None else execer.eval + builtins.execx = None if execer is None else execer.exec + builtins.compilex = None if execer is None else execer.compile + builtins.default_aliases = builtins.aliases = Aliases(DEFAULT_ALIASES) + builtins.aliases.update(load_foreign_aliases(issue_warning=False)) + # history needs to be started after env and aliases + # would be nice to actually include non-detyped versions. + builtins.__xonsh_history__ = History(env=ENV.detype(), #aliases=builtins.aliases, + ts=[time.time(), None], locked=True) + lastflush = lambda s=None, f=None: builtins.__xonsh_history__.flush(at_exit=True) + atexit.register(lastflush) + for sig in AT_EXIT_SIGNALS: + resetting_signal_handle(sig, lastflush) BUILTINS_LOADED = True -def _lastflush(s=None, f=None): - if hasattr(builtins, "__xonsh__"): - if builtins.__xonsh__.history is not None: - builtins.__xonsh__.history.flush(at_exit=True) - - def unload_builtins(): """Removes the xonsh builtins from the Python builtins, if the BUILTINS_LOADED is True, sets BUILTINS_LOADED to False, and returns. """ - global BUILTINS_LOADED - if not hasattr(builtins, "__xonsh__"): - BUILTINS_LOADED = False - return - env = getattr(builtins.__xonsh__, "env", None) - if isinstance(env, Env): - env.undo_replace_env() - if hasattr(builtins.__xonsh__, "pyexit"): - builtins.exit = builtins.__xonsh__.pyexit - if hasattr(builtins.__xonsh__, "pyquit"): - builtins.quit = builtins.__xonsh__.pyquit + global BUILTINS_LOADED, ENV + if ENV is not None: + ENV.undo_replace_env() + ENV = None + if hasattr(builtins, '__xonsh_pyexit__'): + builtins.exit = builtins.__xonsh_pyexit__ + if hasattr(builtins, '__xonsh_pyquit__'): + builtins.quit = builtins.__xonsh_pyquit__ if not BUILTINS_LOADED: return - builtins.__xonsh__.unlink_builtins() - delattr(builtins, "__xonsh__") + names = ['__xonsh_env__', + '__xonsh_ctx__', + '__xonsh_help__', + '__xonsh_superhelp__', + '__xonsh_regexpath__', + '__xonsh_glob__', + '__xonsh_exit__', + '__xonsh_pyexit__', + '__xonsh_pyquit__', + '__xonsh_subproc_captured__', + '__xonsh_subproc_uncaptured__', + '__xonsh_execer__', + 'evalx', + 'execx', + 'compilex', + 'default_aliases', + '__xonsh_all_jobs__', + '__xonsh_active_job__', + '__xonsh_ensure_list_of_strs__', + '__xonsh_history__', + ] + for name in names: + if hasattr(builtins, name): + delattr(builtins, name) BUILTINS_LOADED = False -@contextlib.contextmanager +@contextmanager def xonsh_builtins(execer=None): """A context manager for using the xonsh builtins only in a limited scope. Likely useful in testing. """ load_builtins(execer=execer) - # temporary shims for old __xonsh_*__ builtins - load_proxies() yield - # temporary shims for old __xonsh_*__ builtins - unload_proxies() unload_builtins() - - -class XonshSession: - """All components defining a xonsh session. - - """ - - def __init__(self, execer=None, ctx=None): - """ - Parameters - --------- - execer : Execer, optional - Xonsh execution object, may be None to start - ctx : Mapping, optional - Context to start xonsh session with. - """ - self.execer = execer - self.ctx = {} if ctx is None else ctx - - def load(self, execer=None, ctx=None): - """Loads the session with default values. - - Parameters - --------- - execer : Execer, optional - Xonsh execution object, may be None to start - ctx : Mapping, optional - Context to start xonsh session with. - """ - if ctx is not None: - self.ctx = ctx - self.env = Env(default_env()) - self.help = helper - self.superhelp = superhelper - self.pathsearch = pathsearch - self.globsearch = globsearch - self.regexsearch = regexsearch - self.glob = globpath - self.expand_path = expand_path - self.exit = False - self.stdout_uncaptured = None - self.stderr_uncaptured = None - - if hasattr(builtins, "exit"): - self.pyexit = builtins.exit - del builtins.exit - - if hasattr(builtins, "quit"): - self.pyquit = builtins.quit - del builtins.quit - - self.subproc_captured_stdout = subproc_captured_stdout - self.subproc_captured_inject = subproc_captured_inject - self.subproc_captured_object = subproc_captured_object - self.subproc_captured_hiddenobject = subproc_captured_hiddenobject - self.subproc_uncaptured = subproc_uncaptured - self.execer = execer - self.commands_cache = CommandsCache() - self.all_jobs = {} - self.ensure_list_of_strs = ensure_list_of_strs - self.list_of_strs_or_callables = list_of_strs_or_callables - - self.list_of_list_of_strs_outer_product = list_of_list_of_strs_outer_product - - self.completers = xonsh.completers.init.default_completers() - self.call_macro = call_macro - self.enter_macro = enter_macro - self.path_literal = path_literal - - self.builtins = _BuiltIns(execer) - - self.history = None - self.shell = None - - def link_builtins(self, execer=None): - # public built-ins - builtins.XonshError = self.builtins.XonshError - builtins.XonshCalledProcessError = self.builtins.XonshCalledProcessError - builtins.evalx = None if execer is None else execer.eval - builtins.execx = None if execer is None else execer.exec - builtins.compilex = None if execer is None else execer.compile - builtins.events = self.builtins.events - - # sneak the path search functions into the aliases - # Need this inline/lazy import here since we use locate_binary that - # relies on __xonsh__.env in default aliases - builtins.default_aliases = builtins.aliases = Aliases(make_default_aliases()) - atexit.register(_lastflush) - for sig in AT_EXIT_SIGNALS: - resetting_signal_handle(sig, _lastflush) - - def unlink_builtins(self): - names = [ - "XonshError", - "XonshCalledProcessError", - "evalx", - "execx", - "compilex", - "default_aliases", - ] - - for name in names: - if hasattr(builtins, name): - delattr(builtins, name) - - -class _BuiltIns: - def __init__(self, execer=None): - # public built-ins - self.XonshError = XonshError - self.XonshCalledProcessError = XonshCalledProcessError - self.evalx = None if execer is None else execer.eval - self.execx = None if execer is None else execer.exec - self.compilex = None if execer is None else execer.compile - self.events = events - - -class DynamicAccessProxy: - """Proxies access dynamically.""" - - def __init__(self, refname, objname): - """ - Parameters - ---------- - refname : str - '.'-separated string that represents the new, reference name that - the user will access. - objname : str - '.'-separated string that represents the name where the target - object actually lives that refname points to. - """ - super().__setattr__("refname", refname) - super().__setattr__("objname", objname) - - @property - def obj(self): - """Dynamically grabs object""" - names = self.objname.split(".") - obj = builtins - for name in names: - obj = getattr(obj, name) - return obj - - def __getattr__(self, name): - return getattr(self.obj, name) - - def __setattr__(self, name, value): - return super().__setattr__(self.obj, name, value) - - def __delattr__(self, name): - return delattr(self.obj, name) - - def __getitem__(self, item): - return self.obj.__getitem__(item) - - def __setitem__(self, item, value): - return self.obj.__setitem__(item, value) - - def __delitem__(self, item): - del self.obj[item] - - def __call__(self, *args, **kwargs): - return self.obj.__call__(*args, **kwargs) - - -class DeprecationWarningProxy: - """Proxies access, but warns in the process.""" - - def __init__(self, oldname, newname): - super().__setattr__("oldname", oldname) - super().__setattr__("newname", newname) - - @property - def obj(self): - """Dynamically grabs object""" - names = self.newname.split(".") - obj = builtins - for name in names: - obj = getattr(obj, name) - return obj - - def warn(self): - """Issues deprecation warning.""" - warnings.warn( - "{} has been deprecated, please use {} instead.".format( - self.oldname, self.newname - ), - DeprecationWarning, - stacklevel=3, - ) - - def __getattr__(self, name): - self.warn() - return getattr(self.obj, name) - - def __setattr__(self, name, value): - self.warn() - return super().__setattr__(self.obj, name, value) - - def __delattr__(self, name): - self.warn() - return delattr(self.obj, name) - - def __getitem__(self, item): - self.warn() - return self.obj.__getitem__(item) - - def __setitem__(self, item, value): - self.warn() - return self.obj.__setitem__(item, value) - - def __delitem__(self, item): - self.warn() - del self.obj[item] - - def __call__(self, *args, **kwargs): - self.warn() - return self.obj.__call__(*args, **kwargs) - - -def load_proxies(): - """Loads builtin dynamic access proxies. - Also puts temporary shims in place for `__xonsh_*__` builtins. - """ - proxy_mapping = { - "XonshError": "__xonsh__.builtins.XonshError", - "XonshCalledProcessError": "__xonsh__.builtins.XonshCalledProcessError", - "evalx": "__xonsh__.builtins.evalx", - "execx": "__xonsh__.builtins.execx", - "compilex": "__xonsh__.builtins.compilex", - "events": "__xonsh__.builtins.events", - } - for refname, objname in proxy_mapping.items(): - proxy = DynamicAccessProxy(refname, objname) - setattr(builtins, refname, proxy) - - deprecated_mapping = { - "__xonsh_env__": "__xonsh__.env", - "__xonsh_history__": "__xonsh__.history", - "__xonsh_ctx__": "__xonsh__.ctx", - "__xonsh_help__": "__xonsh__.help", - "__xonsh_superhelp__": "__xonsh__.superhelp", - "__xonsh_pathsearch__": "__xonsh__.pathsearch", - "__xonsh_globsearch__": "__xonsh__.globsearch", - "__xonsh_regexsearch__": "__xonsh__.regexsearch", - "__xonsh_glob__": "__xonsh__.glob", - "__xonsh_expand_path__": "__xonsh__.expand_path", - "__xonsh_exit__": "__xonsh__.exit", - "__xonsh_stdout_uncaptured__": "__xonsh__.stdout_uncaptured", - "__xonsh_stderr_uncaptured__": "__xonsh__.stderr_uncaptured", - "__xonsh_subproc_captured_stdout__": "__xonsh__.subproc_captured_stdout", - "__xonsh_subproc_captured_inject__": "__xonsh__.subproc_captured_inject", - "__xonsh_subproc_captured_object__": "__xonsh__.subproc_captured_object", - "__xonsh_subproc_captured_hiddenobject__": "__xonsh__.subproc_captured_hiddenobject", - "__xonsh_subproc_uncaptured__": "__xonsh__.subproc_uncaptured", - "__xonsh_execer__": "__xonsh__.execer", - "__xonsh_commands_cache__": "__xonsh__.commands_cache", - "__xonsh_all_jobs__": "__xonsh__.all_jobs", - "__xonsh_ensure_list_of_strs__": "__xonsh__.ensure_list_of_strs", - "__xonsh_list_of_strs_or_callables__": "__xonsh__.list_of_strs_or_callables", - "__xonsh_list_of_list_of_strs_outer_product__": "__xonsh__.list_of_list_of_strs_outer_product", - "__xonsh_completers__": "__xonsh__.completers", - "__xonsh_call_macro__": "__xonsh__.call_macro", - "__xonsh_enter_macro__": "__xonsh__.enter_macro", - "__xonsh_path_literal__": "__xonsh__.path_literal", - } - for badname, goodname in deprecated_mapping.items(): - proxy = DeprecationWarningProxy(badname, goodname) - setattr(builtins, badname, proxy) - - if hasattr(builtins.__xonsh__, "pyexit"): - builtins.__xonsh_pyexit__ = DeprecationWarningProxy( - "builtins.__xonsh_pyexit__", "builtins.__xonsh__.pyexit" - ) - if hasattr(builtins.__xonsh__, "quit"): - builtins.__xonsh_pyquit__ = DeprecationWarningProxy( - "builtins.__xonsh_pyquit__", "builtins.__xonsh__.pyquit" - ) - - -def unload_proxies(): - """Removes the xonsh builtins (proxies) from the Python builtins. - """ - if hasattr(builtins, "__xonsh_pyexit__"): - builtins.exit = builtins.__xonsh_pyexit__ - if hasattr(builtins, "__xonsh_pyquit__"): - builtins.quit = builtins.__xonsh_pyquit__ - - names = [ - "__xonsh_env__", - "__xonsh_ctx__", - "__xonsh_help__", - "__xonsh_superhelp__", - "__xonsh_pathsearch__", - "__xonsh_globsearch__", - "__xonsh_regexsearch__", - "__xonsh_glob__", - "__xonsh_expand_path__", - "__xonsh_exit__", - "__xonsh_stdout_uncaptured__", - "__xonsh_stderr_uncaptured__", - "__xonsh_pyexit__", - "__xonsh_pyquit__", - "__xonsh_subproc_captured_stdout__", - "__xonsh_subproc_captured_inject__", - "__xonsh_subproc_captured_object__", - "__xonsh_subproc_captured_hiddenobject__", - "__xonsh_subproc_uncaptured__", - "__xonsh_execer__", - "__xonsh_commands_cache__", - "__xonsh_completers__", - "__xonsh_call_macro__", - "__xonsh_enter_macro__", - "__xonsh_path_literal__", - "XonshError", - "XonshCalledProcessError", - "evalx", - "execx", - "compilex", - "default_aliases", - "__xonsh_all_jobs__", - "__xonsh_ensure_list_of_strs__", - "__xonsh_list_of_strs_or_callables__", - "__xonsh_list_of_list_of_strs_outer_product__", - "__xonsh_history__", - ] - for name in names: - if hasattr(builtins, name): - delattr(builtins, name) diff --git a/xonsh/codecache.py b/xonsh/codecache.py deleted file mode 100644 index 32dcfc9..0000000 --- a/xonsh/codecache.py +++ /dev/null @@ -1,209 +0,0 @@ -"""Tools for caching xonsh code.""" -import os -import sys -import hashlib -import marshal -import builtins - -from xonsh import __version__ as XONSH_VERSION -from xonsh.lazyasd import lazyobject -from xonsh.platform import PYTHON_VERSION_INFO_BYTES - - -def _splitpath(path, sofar=[]): - folder, path = os.path.split(path) - if path == "": - return sofar[::-1] - elif folder == "": - return (sofar + [path])[::-1] - else: - return _splitpath(folder, sofar + [path]) - - -@lazyobject -def _CHARACTER_MAP(): - cmap = {chr(o): "_%s" % chr(o + 32) for o in range(65, 91)} - cmap.update({".": "_.", "_": "__"}) - return cmap - - -def _cache_renamer(path, code=False): - if not code: - path = os.path.realpath(path) - o = ["".join(_CHARACTER_MAP.get(i, i) for i in w) for w in _splitpath(path)] - o[-1] = "{}.{}".format(o[-1], sys.implementation.cache_tag) - return o - - -def _make_if_not_exists(dirname): - if not os.path.isdir(dirname): - os.makedirs(dirname) - - -def should_use_cache(execer, mode): - """ - Return ``True`` if caching has been enabled for this mode (through command - line flags or environment variables) - """ - if mode == "exec": - return (execer.scriptcache or execer.cacheall) and ( - builtins.__xonsh__.env["XONSH_CACHE_SCRIPTS"] - or builtins.__xonsh__.env["XONSH_CACHE_EVERYTHING"] - ) - else: - return execer.cacheall or builtins.__xonsh__.env["XONSH_CACHE_EVERYTHING"] - - -def run_compiled_code(code, glb, loc, mode): - """ - Helper to run code in a given mode and context - """ - if code is None: - return - if mode in {"exec", "single"}: - func = exec - else: - func = eval - func(code, glb, loc) - - -def get_cache_filename(fname, code=True): - """ - Return the filename of the cache for the given filename. - - Cache filenames are similar to those used by the Mercurial DVCS for its - internal store. - - The ``code`` switch should be true if we should use the code store rather - than the script store. - """ - datadir = builtins.__xonsh__.env["XONSH_DATA_DIR"] - cachedir = os.path.join( - datadir, "xonsh_code_cache" if code else "xonsh_script_cache" - ) - cachefname = os.path.join(cachedir, *_cache_renamer(fname, code=code)) - return cachefname - - -def update_cache(ccode, cache_file_name): - """ - Update the cache at ``cache_file_name`` to contain the compiled code - represented by ``ccode``. - """ - if cache_file_name is not None: - _make_if_not_exists(os.path.dirname(cache_file_name)) - with open(cache_file_name, "wb") as cfile: - cfile.write(XONSH_VERSION.encode() + b"\n") - cfile.write(bytes(PYTHON_VERSION_INFO_BYTES) + b"\n") - marshal.dump(ccode, cfile) - - -def _check_cache_versions(cfile): - # version data should be < 1 kb - ver = cfile.readline(1024).strip() - if ver != XONSH_VERSION.encode(): - return False - ver = cfile.readline(1024).strip() - return ver == PYTHON_VERSION_INFO_BYTES - - -def compile_code(filename, code, execer, glb, loc, mode): - """ - Wrapper for ``execer.compile`` to compile the given code - """ - try: - if not code.endswith("\n"): - code += "\n" - old_filename = execer.filename - execer.filename = filename - ccode = execer.compile(code, glbs=glb, locs=loc, mode=mode, filename=filename) - except Exception: - raise - finally: - execer.filename = old_filename - return ccode - - -def script_cache_check(filename, cachefname): - """ - Check whether the script cache for a particular file is valid. - - Returns a tuple containing: a boolean representing whether the cached code - should be used, and the cached code (or ``None`` if the cache should not be - used). - """ - ccode = None - run_cached = False - if os.path.isfile(cachefname): - if os.stat(cachefname).st_mtime >= os.stat(filename).st_mtime: - with open(cachefname, "rb") as cfile: - if not _check_cache_versions(cfile): - return False, None - ccode = marshal.load(cfile) - run_cached = True - return run_cached, ccode - - -def run_script_with_cache(filename, execer, glb=None, loc=None, mode="exec"): - """ - Run a script, using a cached version if it exists (and the source has not - changed), and updating the cache as necessary. - """ - run_cached = False - use_cache = should_use_cache(execer, mode) - cachefname = get_cache_filename(filename, code=False) - if use_cache: - run_cached, ccode = script_cache_check(filename, cachefname) - if not run_cached: - with open(filename, "r") as f: - code = f.read() - ccode = compile_code(filename, code, execer, glb, loc, mode) - update_cache(ccode, cachefname) - run_compiled_code(ccode, glb, loc, mode) - - -def code_cache_name(code): - """ - Return an appropriate spoofed filename for the given code. - """ - if isinstance(code, str): - _code = code.encode() - else: - _code = code - return hashlib.md5(_code).hexdigest() - - -def code_cache_check(cachefname): - """ - Check whether the code cache for a particular piece of code is valid. - - Returns a tuple containing: a boolean representing whether the cached code - should be used, and the cached code (or ``None`` if the cache should not be - used). - """ - ccode = None - run_cached = False - if os.path.isfile(cachefname): - with open(cachefname, "rb") as cfile: - if not _check_cache_versions(cfile): - return False, None - ccode = marshal.load(cfile) - run_cached = True - return run_cached, ccode - - -def run_code_with_cache(code, execer, glb=None, loc=None, mode="exec"): - """ - Run a piece of code, using a cached version if it exists, and updating the - cache as necessary. - """ - use_cache = should_use_cache(execer, mode) - filename = code_cache_name(code) - cachefname = get_cache_filename(filename, code=True) - run_cached = False - if use_cache: - run_cached, ccode = code_cache_check(cachefname) - if not run_cached: - ccode = compile_code(filename, code, execer, glb, loc, mode) - update_cache(ccode, cachefname) - run_compiled_code(ccode, glb, loc, mode) diff --git a/xonsh/color_tools.py b/xonsh/color_tools.py deleted file mode 100644 index 1a60584..0000000 --- a/xonsh/color_tools.py +++ /dev/null @@ -1,419 +0,0 @@ -"""Tools for color handling in xonsh. - -This includes Convert values between RGB hex codes and xterm-256 -color codes. Parts of this file were originally forked from Micah Elliott -http://MicahElliott.com Copyright (C) 2011 Micah Elliott. All rights reserved. -WTFPL http://sam.zoy.org/wtfpl/ -""" -import re -import math - -from xonsh.lazyasd import lazyobject, LazyObject - - -RE_BACKGROUND = LazyObject( - lambda: re.compile("(BG#|BGHEX|BACKGROUND)"), globals(), "RE_BACKGROUND" -) - - -@lazyobject -def BASE_XONSH_COLORS(): - return { - "BLACK": (0, 0, 0), - "RED": (170, 0, 0), - "GREEN": (0, 170, 0), - "YELLOW": (170, 85, 0), - "BLUE": (0, 0, 170), - "PURPLE": (170, 0, 170), - "CYAN": (0, 170, 170), - "WHITE": (170, 170, 170), - "INTENSE_BLACK": (85, 85, 85), - "INTENSE_RED": (255, 85, 85), - "INTENSE_GREEN": (85, 255, 85), - "INTENSE_YELLOW": (255, 255, 85), - "INTENSE_BLUE": (85, 85, 255), - "INTENSE_PURPLE": (255, 85, 255), - "INTENSE_CYAN": (85, 255, 255), - "INTENSE_WHITE": (255, 255, 255), - } - - -@lazyobject -def CLUT(): - """color look-up table""" - return [ - # 8-bit, RGB hex - # Primary 3-bit (8 colors). Unique representation! - ("00", "000000"), - ("01", "800000"), - ("02", "008000"), - ("03", "808000"), - ("04", "000080"), - ("05", "800080"), - ("06", "008080"), - ("07", "c0c0c0"), - # Equivalent "bright" versions of original 8 colors. - ("08", "808080"), - ("09", "ff0000"), - ("10", "00ff00"), - ("11", "ffff00"), - ("12", "0000ff"), - ("13", "ff00ff"), - ("14", "00ffff"), - ("15", "ffffff"), - # Strictly ascending. - ("16", "000000"), - ("17", "00005f"), - ("18", "000087"), - ("19", "0000af"), - ("20", "0000d7"), - ("21", "0000ff"), - ("22", "005f00"), - ("23", "005f5f"), - ("24", "005f87"), - ("25", "005faf"), - ("26", "005fd7"), - ("27", "005fff"), - ("28", "008700"), - ("29", "00875f"), - ("30", "008787"), - ("31", "0087af"), - ("32", "0087d7"), - ("33", "0087ff"), - ("34", "00af00"), - ("35", "00af5f"), - ("36", "00af87"), - ("37", "00afaf"), - ("38", "00afd7"), - ("39", "00afff"), - ("40", "00d700"), - ("41", "00d75f"), - ("42", "00d787"), - ("43", "00d7af"), - ("44", "00d7d7"), - ("45", "00d7ff"), - ("46", "00ff00"), - ("47", "00ff5f"), - ("48", "00ff87"), - ("49", "00ffaf"), - ("50", "00ffd7"), - ("51", "00ffff"), - ("52", "5f0000"), - ("53", "5f005f"), - ("54", "5f0087"), - ("55", "5f00af"), - ("56", "5f00d7"), - ("57", "5f00ff"), - ("58", "5f5f00"), - ("59", "5f5f5f"), - ("60", "5f5f87"), - ("61", "5f5faf"), - ("62", "5f5fd7"), - ("63", "5f5fff"), - ("64", "5f8700"), - ("65", "5f875f"), - ("66", "5f8787"), - ("67", "5f87af"), - ("68", "5f87d7"), - ("69", "5f87ff"), - ("70", "5faf00"), - ("71", "5faf5f"), - ("72", "5faf87"), - ("73", "5fafaf"), - ("74", "5fafd7"), - ("75", "5fafff"), - ("76", "5fd700"), - ("77", "5fd75f"), - ("78", "5fd787"), - ("79", "5fd7af"), - ("80", "5fd7d7"), - ("81", "5fd7ff"), - ("82", "5fff00"), - ("83", "5fff5f"), - ("84", "5fff87"), - ("85", "5fffaf"), - ("86", "5fffd7"), - ("87", "5fffff"), - ("88", "870000"), - ("89", "87005f"), - ("90", "870087"), - ("91", "8700af"), - ("92", "8700d7"), - ("93", "8700ff"), - ("94", "875f00"), - ("95", "875f5f"), - ("96", "875f87"), - ("97", "875faf"), - ("98", "875fd7"), - ("99", "875fff"), - ("100", "878700"), - ("101", "87875f"), - ("102", "878787"), - ("103", "8787af"), - ("104", "8787d7"), - ("105", "8787ff"), - ("106", "87af00"), - ("107", "87af5f"), - ("108", "87af87"), - ("109", "87afaf"), - ("110", "87afd7"), - ("111", "87afff"), - ("112", "87d700"), - ("113", "87d75f"), - ("114", "87d787"), - ("115", "87d7af"), - ("116", "87d7d7"), - ("117", "87d7ff"), - ("118", "87ff00"), - ("119", "87ff5f"), - ("120", "87ff87"), - ("121", "87ffaf"), - ("122", "87ffd7"), - ("123", "87ffff"), - ("124", "af0000"), - ("125", "af005f"), - ("126", "af0087"), - ("127", "af00af"), - ("128", "af00d7"), - ("129", "af00ff"), - ("130", "af5f00"), - ("131", "af5f5f"), - ("132", "af5f87"), - ("133", "af5faf"), - ("134", "af5fd7"), - ("135", "af5fff"), - ("136", "af8700"), - ("137", "af875f"), - ("138", "af8787"), - ("139", "af87af"), - ("140", "af87d7"), - ("141", "af87ff"), - ("142", "afaf00"), - ("143", "afaf5f"), - ("144", "afaf87"), - ("145", "afafaf"), - ("146", "afafd7"), - ("147", "afafff"), - ("148", "afd700"), - ("149", "afd75f"), - ("150", "afd787"), - ("151", "afd7af"), - ("152", "afd7d7"), - ("153", "afd7ff"), - ("154", "afff00"), - ("155", "afff5f"), - ("156", "afff87"), - ("157", "afffaf"), - ("158", "afffd7"), - ("159", "afffff"), - ("160", "d70000"), - ("161", "d7005f"), - ("162", "d70087"), - ("163", "d700af"), - ("164", "d700d7"), - ("165", "d700ff"), - ("166", "d75f00"), - ("167", "d75f5f"), - ("168", "d75f87"), - ("169", "d75faf"), - ("170", "d75fd7"), - ("171", "d75fff"), - ("172", "d78700"), - ("173", "d7875f"), - ("174", "d78787"), - ("175", "d787af"), - ("176", "d787d7"), - ("177", "d787ff"), - ("178", "d7af00"), - ("179", "d7af5f"), - ("180", "d7af87"), - ("181", "d7afaf"), - ("182", "d7afd7"), - ("183", "d7afff"), - ("184", "d7d700"), - ("185", "d7d75f"), - ("186", "d7d787"), - ("187", "d7d7af"), - ("188", "d7d7d7"), - ("189", "d7d7ff"), - ("190", "d7ff00"), - ("191", "d7ff5f"), - ("192", "d7ff87"), - ("193", "d7ffaf"), - ("194", "d7ffd7"), - ("195", "d7ffff"), - ("196", "ff0000"), - ("197", "ff005f"), - ("198", "ff0087"), - ("199", "ff00af"), - ("200", "ff00d7"), - ("201", "ff00ff"), - ("202", "ff5f00"), - ("203", "ff5f5f"), - ("204", "ff5f87"), - ("205", "ff5faf"), - ("206", "ff5fd7"), - ("207", "ff5fff"), - ("208", "ff8700"), - ("209", "ff875f"), - ("210", "ff8787"), - ("211", "ff87af"), - ("212", "ff87d7"), - ("213", "ff87ff"), - ("214", "ffaf00"), - ("215", "ffaf5f"), - ("216", "ffaf87"), - ("217", "ffafaf"), - ("218", "ffafd7"), - ("219", "ffafff"), - ("220", "ffd700"), - ("221", "ffd75f"), - ("222", "ffd787"), - ("223", "ffd7af"), - ("224", "ffd7d7"), - ("225", "ffd7ff"), - ("226", "ffff00"), - ("227", "ffff5f"), - ("228", "ffff87"), - ("229", "ffffaf"), - ("230", "ffffd7"), - ("231", "ffffff"), - # Gray-scale range. - ("232", "080808"), - ("233", "121212"), - ("234", "1c1c1c"), - ("235", "262626"), - ("236", "303030"), - ("237", "3a3a3a"), - ("238", "444444"), - ("239", "4e4e4e"), - ("240", "585858"), - ("241", "626262"), - ("242", "6c6c6c"), - ("243", "767676"), - ("244", "808080"), - ("245", "8a8a8a"), - ("246", "949494"), - ("247", "9e9e9e"), - ("248", "a8a8a8"), - ("249", "b2b2b2"), - ("250", "bcbcbc"), - ("251", "c6c6c6"), - ("252", "d0d0d0"), - ("253", "dadada"), - ("254", "e4e4e4"), - ("255", "eeeeee"), - ] - - -def _str2hex(hexstr): - return int(hexstr, 16) - - -def _strip_hash(rgb): - # Strip leading `#` if exists. - if rgb.startswith("#"): - rgb = rgb.lstrip("#") - return rgb - - -@lazyobject -def SHORT_TO_RGB(): - return dict(CLUT) - - -@lazyobject -def RGB_TO_SHORT(): - return {v: k for k, v in SHORT_TO_RGB.items()} - - -def short2rgb(short): - return SHORT_TO_RGB[short] - - -def rgb_to_256(rgb): - """Find the closest ANSI 256 approximation to the given RGB value. - - >>> rgb2short('123456') - ('23', '005f5f') - >>> rgb2short('ffffff') - ('231', 'ffffff') - >>> rgb2short('0DADD6') # vimeo logo - ('38', '00afd7') - - Parameters - ---------- - rgb : Hex code representing an RGB value, eg, 'abcdef' - - Returns - ------- - String between 0 and 255, compatible with xterm. - """ - rgb = rgb.lstrip("#") - if len(rgb) == 0: - return "0", "000000" - incs = (0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF) - # Break 6-char RGB code into 3 integer vals. - parts = rgb_to_ints(rgb) - res = [] - for part in parts: - i = 0 - while i < len(incs) - 1: - s, b = incs[i], incs[i + 1] # smaller, bigger - if s <= part <= b: - s1 = abs(s - part) - b1 = abs(b - part) - if s1 < b1: - closest = s - else: - closest = b - res.append(closest) - break - i += 1 - res = "".join([("%02.x" % i) for i in res]) - equiv = RGB_TO_SHORT[res] - return equiv, res - - -rgb2short = rgb_to_256 - - -@lazyobject -def RE_RGB3(): - return re.compile(r"(.)(.)(.)") - - -@lazyobject -def RE_RGB6(): - return re.compile(r"(..)(..)(..)") - - -def rgb_to_ints(rgb): - if len(rgb) == 6: - return tuple([int(h, 16) for h in RE_RGB6.split(rgb)[1:4]]) - else: - return tuple([int(h * 2, 16) for h in RE_RGB3.split(rgb)[1:4]]) - - -def short_to_ints(short): - """Coverts a short (256) color to a 3-tuple of ints.""" - return rgb_to_ints(short2rgb(short)) - - -def color_dist(x, y): - return math.sqrt((x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 + (x[2] - y[2]) ** 2) - - -def find_closest_color(x, palette): - return min(sorted(palette.keys())[::-1], key=lambda k: color_dist(x, palette[k])) - - -def make_palette(strings): - """Makes a color palette from a collection of strings.""" - palette = {} - for s in strings: - while "#" in s: - _, t = s.split("#", 1) - t, _, s = t.partition(" ") - palette[t] = rgb_to_ints(t) - return palette diff --git a/xonsh/commands_cache.py b/xonsh/commands_cache.py deleted file mode 100644 index 7d4a3f8..0000000 --- a/xonsh/commands_cache.py +++ /dev/null @@ -1,441 +0,0 @@ -# -*- coding: utf-8 -*- -"""Module for caching command & alias names as well as for predicting whether -a command will be able to be run in the background. - -A background predictor is a function that accepts a single argument list -and returns whether or not the process can be run in the background (returns -True) or must be run the foreground (returns False). -""" -import os -import time -import builtins -import argparse -import collections.abc as cabc - -from xonsh.platform import ON_WINDOWS, ON_POSIX, pathbasename -from xonsh.tools import executables_in -from xonsh.lazyasd import lazyobject - - -class CommandsCache(cabc.Mapping): - """A lazy cache representing the commands available on the file system. - The keys are the command names and the values a tuple of (loc, has_alias) - where loc is either a str pointing to the executable on the file system or - None (if no executable exists) and has_alias is a boolean flag for whether - the command has an alias. - """ - - def __init__(self): - self._cmds_cache = {} - self._path_checksum = None - self._alias_checksum = None - self._path_mtime = -1 - self.threadable_predictors = default_threadable_predictors() - - def __contains__(self, key): - _ = self.all_commands - return self.lazyin(key) - - def __iter__(self): - for cmd, (path, is_alias) in self.all_commands.items(): - if ON_WINDOWS and path is not None: - # All command keys are stored in uppercase on Windows. - # This ensures the original command name is returned. - cmd = pathbasename(path) - yield cmd - - def __len__(self): - return len(self.all_commands) - - def __getitem__(self, key): - _ = self.all_commands - return self.lazyget(key) - - def is_empty(self): - """Returns whether the cache is populated or not.""" - return len(self._cmds_cache) == 0 - - @staticmethod - def get_possible_names(name): - """Generates the possible `PATHEXT` extension variants of a given executable - name on Windows as a list, conserving the ordering in `PATHEXT`. - Returns a list as `name` being the only item in it on other platforms.""" - if ON_WINDOWS: - pathext = builtins.__xonsh__.env.get("PATHEXT", []) - name = name.upper() - return [name + ext for ext in ([""] + pathext)] - else: - return [name] - - @staticmethod - def remove_dups(p): - ret = list() - for e in p: - if e not in ret: - ret.append(e) - return ret - - @property - def all_commands(self): - paths = builtins.__xonsh__.env.get("PATH", []) - paths = CommandsCache.remove_dups(paths) - path_immut = tuple(x for x in paths if os.path.isdir(x)) - # did PATH change? - path_hash = hash(path_immut) - cache_valid = path_hash == self._path_checksum - self._path_checksum = path_hash - # did aliases change? - alss = getattr(builtins, "aliases", dict()) - al_hash = hash(frozenset(alss)) - cache_valid = cache_valid and al_hash == self._alias_checksum - self._alias_checksum = al_hash - # did the contents of any directory in PATH change? - max_mtime = 0 - for path in path_immut: - mtime = os.stat(path).st_mtime - if mtime > max_mtime: - max_mtime = mtime - cache_valid = cache_valid and (max_mtime <= self._path_mtime) - self._path_mtime = max_mtime - if cache_valid: - return self._cmds_cache - allcmds = {} - for path in reversed(path_immut): - # iterate backwards so that entries at the front of PATH overwrite - # entries at the back. - for cmd in executables_in(path): - key = cmd.upper() if ON_WINDOWS else cmd - allcmds[key] = (os.path.join(path, cmd), alss.get(key, None)) - for cmd in alss: - if cmd not in allcmds: - key = cmd.upper() if ON_WINDOWS else cmd - allcmds[key] = (cmd, True) - self._cmds_cache = allcmds - return allcmds - - def cached_name(self, name): - """Returns the name that would appear in the cache, if it exists.""" - if name is None: - return None - cached = pathbasename(name) - if ON_WINDOWS: - keys = self.get_possible_names(cached) - cached = next((k for k in keys if k in self._cmds_cache), None) - return cached - - def lazyin(self, key): - """Checks if the value is in the current cache without the potential to - update the cache. It just says whether the value is known *now*. This - may not reflect precisely what is on the $PATH. - """ - return self.cached_name(key) in self._cmds_cache - - def lazyiter(self): - """Returns an iterator over the current cache contents without the - potential to update the cache. This may not reflect what is on the - $PATH. - """ - return iter(self._cmds_cache) - - def lazylen(self): - """Returns the length of the current cache contents without the - potential to update the cache. This may not reflect precisely - what is on the $PATH. - """ - return len(self._cmds_cache) - - def lazyget(self, key, default=None): - """A lazy value getter.""" - return self._cmds_cache.get(self.cached_name(key), default) - - def locate_binary(self, name, ignore_alias=False): - """Locates an executable on the file system using the cache. - - Arguments - --------- - name : str - name of binary to search for - ignore_alias : bool, optional - Force return of binary path even if alias of ``name`` exists - (default ``False``) - """ - # make sure the cache is up to date by accessing the property - _ = self.all_commands - return self.lazy_locate_binary(name, ignore_alias) - - def lazy_locate_binary(self, name, ignore_alias=False): - """Locates an executable in the cache, without checking its validity. - - Arguments - --------- - name : str - name of binary to search for - ignore_alias : bool, optional - Force return of binary path even if alias of ``name`` exists - (default ``False``) - """ - possibilities = self.get_possible_names(name) - if ON_WINDOWS: - # Windows users expect to be able to execute files in the same - # directory without `./` - local_bin = next((fn for fn in possibilities if os.path.isfile(fn)), None) - if local_bin: - return os.path.abspath(local_bin) - cached = next((cmd for cmd in possibilities if cmd in self._cmds_cache), None) - if cached: - (path, alias) = self._cmds_cache[cached] - ispure = path == pathbasename(path) - if alias and ignore_alias and ispure: - # pure alias, which we are ignoring - return None - else: - return path - elif os.path.isfile(name) and name != pathbasename(name): - return name - - def is_only_functional_alias(self, name): - """Returns whether or not a command is only a functional alias, and has - no underlying executable. For example, the "cd" command is only available - as a functional alias. - """ - _ = self.all_commands - return self.lazy_is_only_functional_alias(name) - - def lazy_is_only_functional_alias(self, name): - """Returns whether or not a command is only a functional alias, and has - no underlying executable. For example, the "cd" command is only available - as a functional alias. This search is performed lazily. - """ - val = self._cmds_cache.get(name, None) - if val is None: - return False - return ( - val == (name, True) and self.locate_binary(name, ignore_alias=True) is None - ) - - def predict_threadable(self, cmd): - """Predicts whether a command list is able to be run on a background - thread, rather than the main thread. - """ - name = self.cached_name(cmd[0]) - predictors = self.threadable_predictors - if ON_WINDOWS: - # On all names (keys) are stored in upper case so instead - # we get the original cmd or alias name - path, _ = self.lazyget(name, (None, None)) - if path is None: - return True - else: - name = pathbasename(path) - if name not in predictors: - pre, ext = os.path.splitext(name) - if pre in predictors: - predictors[name] = predictors[pre] - if name not in predictors: - predictors[name] = self.default_predictor(name, cmd[0]) - predictor = predictors[name] - return predictor(cmd[1:]) - - # - # Background Predictors (as methods) - # - - def default_predictor(self, name, cmd0): - if ON_POSIX: - return self.default_predictor_readbin( - name, cmd0, timeout=0.1, failure=predict_true - ) - else: - return predict_true - - def default_predictor_readbin(self, name, cmd0, timeout, failure): - """Make a default predictor by - analyzing the content of the binary. Should only works on POSIX. - Return failure if the analysis fails. - """ - fname = cmd0 if os.path.isabs(cmd0) else None - fname = cmd0 if fname is None and os.sep in cmd0 else fname - fname = self.lazy_locate_binary(name) if fname is None else fname - - if fname is None: - return failure - if not os.path.isfile(fname): - return failure - - try: - fd = os.open(fname, os.O_RDONLY | os.O_NONBLOCK) - except Exception: - return failure # opening error - - search_for = { - (b"ncurses",): [False], - (b"libgpm",): [False], - (b"isatty", b"tcgetattr", b"tcsetattr"): [False, False, False], - } - tstart = time.time() - block = b"" - while time.time() < tstart + timeout: - previous_block = block - try: - block = os.read(fd, 2048) - except Exception: - # should not occur, except e.g. if a file is deleted a a dir is - # created with the same name between os.path.isfile and os.open - os.close(fd) - return failure - if len(block) == 0: - os.close(fd) - return predict_true # no keys of search_for found - analyzed_block = previous_block + block - for k, v in search_for.items(): - for i in range(len(k)): - if v[i]: - continue - if k[i] in analyzed_block: - v[i] = True - if all(v): - os.close(fd) - return predict_false # use one key of search_for - os.close(fd) - return failure # timeout - - -# -# Background Predictors -# - - -def predict_true(args): - """Always say the process is threadable.""" - return True - - -def predict_false(args): - """Never say the process is threadable.""" - return False - - -@lazyobject -def SHELL_PREDICTOR_PARSER(): - p = argparse.ArgumentParser("shell", add_help=False) - p.add_argument("-c", nargs="?", default=None) - p.add_argument("filename", nargs="?", default=None) - return p - - -def predict_shell(args): - """Predict the backgroundability of the normal shell interface, which - comes down to whether it is being run in subproc mode. - """ - ns, _ = SHELL_PREDICTOR_PARSER.parse_known_args(args) - if ns.c is None and ns.filename is None: - pred = False - else: - pred = True - return pred - - -@lazyobject -def HELP_VER_PREDICTOR_PARSER(): - p = argparse.ArgumentParser("cmd", add_help=False) - p.add_argument("-h", "--help", dest="help", action="store_true", default=None) - p.add_argument( - "-v", "-V", "--version", dest="version", action="store_true", default=None - ) - return p - - -def predict_help_ver(args): - """Predict the backgroundability of commands that have help & version - switches: -h, --help, -v, -V, --version. If either of these options is - present, the command is assumed to print to stdout normally and is therefore - threadable. Otherwise, the command is assumed to not be threadable. - This is useful for commands, like top, that normally enter alternate mode - but may not in certain circumstances. - """ - ns, _ = HELP_VER_PREDICTOR_PARSER.parse_known_args(args) - pred = ns.help is not None or ns.version is not None - return pred - - -@lazyobject -def HG_PREDICTOR_PARSER(): - p = argparse.ArgumentParser("hg", add_help=False) - p.add_argument("command") - p.add_argument( - "-i", "--interactive", action="store_true", default=False, dest="interactive" - ) - return p - - -def predict_hg(args): - """Predict if mercurial is about to be run in interactive mode. - If it is interactive, predict False. If it isn't, predict True. - Also predict False for certain commands, such as split. - """ - ns, _ = HG_PREDICTOR_PARSER.parse_known_args(args) - if ns.command == "split": - return False - else: - return not ns.interactive - - -def default_threadable_predictors(): - """Generates a new defaultdict for known threadable predictors. - The default is to predict true. - """ - # alphabetical, for what it is worth. - predictors = { - "aurman": predict_false, - "bash": predict_shell, - "csh": predict_shell, - "clear": predict_false, - "cls": predict_false, - "cmd": predict_shell, - "cryptop": predict_false, - "curl": predict_true, - "ex": predict_false, - "emacsclient": predict_false, - "fish": predict_shell, - "gvim": predict_help_ver, - "hg": predict_hg, - "htop": predict_help_ver, - "ipython": predict_shell, - "ksh": predict_shell, - "less": predict_help_ver, - "ls": predict_true, - "man": predict_help_ver, - "more": predict_help_ver, - "mvim": predict_help_ver, - "mutt": predict_help_ver, - "nano": predict_help_ver, - "nvim": predict_false, - "ponysay": predict_help_ver, - "psql": predict_false, - "python": predict_shell, - "python2": predict_shell, - "python3": predict_shell, - "repo": predict_help_ver, - "ranger": predict_help_ver, - "rview": predict_false, - "rvim": predict_false, - "scp": predict_false, - "sh": predict_shell, - "ssh": predict_false, - "startx": predict_false, - "sudo": predict_help_ver, - "tcsh": predict_shell, - "telnet": predict_false, - "top": predict_help_ver, - "vi": predict_false, - "view": predict_false, - "vim": predict_false, - "vimpager": predict_help_ver, - "weechat": predict_help_ver, - "xclip": predict_help_ver, - "xo": predict_help_ver, - "xonsh": predict_shell, - "xon.sh": predict_shell, - "zsh": predict_shell, - } - return predictors diff --git a/xonsh/completer.py b/xonsh/completer.py index 7d03515..ef0be4f 100644 --- a/xonsh/completer.py +++ b/xonsh/completer.py @@ -1,14 +1,100 @@ -# -*- coding: utf-8 -*- """A (tab-)completer for xonsh.""" +import os +import re import builtins -import collections.abc as cabc +import pickle +import shlex +import subprocess +import sys + +from xonsh.built_ins import iglobpath +from xonsh.tools import subexpr_from_unbalanced +from xonsh.tools import ON_WINDOWS + + +RE_DASHF = re.compile(r'-F\s+(\w+)') +RE_ATTR = re.compile(r'(\S+(\..+)*)\.(\w*)$') +RE_WIN_DRIVE = re.compile(r'^([a-zA-Z]):\\') + +XONSH_TOKENS = { + 'and ', 'as ', 'assert ', 'break', 'class ', 'continue', 'def ', 'del ', + 'elif ', 'else', 'except ', 'finally:', 'for ', 'from ', 'global ', + 'import ', 'if ', 'in ', 'is ', 'lambda ', 'nonlocal ', 'not ', 'or ', + 'pass', 'raise ', 'return ', 'try:', 'while ', 'with ', 'yield ', '+', '-', + '/', '//', '%', '**', '|', '&', '~', '^', '>>', '<<', '<', '<=', '>', '>=', + '==', '!=', '->', '=', '+=', '-=', '*=', '/=', '%=', '**=', '>>=', '<<=', + '&=', '^=', '|=', '//=', ',', ';', ':', '?', '??', '$(', '${', '$[', '..', + '...' +} + +BASH_COMPLETE_SCRIPT = """source {filename} +COMP_WORDS=({line}) +COMP_LINE={comp_line} +COMP_POINT=${{#COMP_LINE}} +COMP_COUNT={end} +COMP_CWORD={n} +{func} {cmd} {prefix} {prev} +for ((i=0;i<${{#COMPREPLY[*]}};i++)) do echo ${{COMPREPLY[i]}}; done +""" + +def startswithlow(x, start, startlow=None): + """True if x starts with a string or its lowercase version. The lowercase + version may be optionally be provided. + """ + if startlow is None: + startlow = start.lower() + return x.startswith(start) or x.lower().startswith(startlow) + + +def startswithnorm(x, start, startlow=None): + """True if x starts with a string s. Ignores its lowercase version, but + matches the API of startswithlow(). + """ + return x.startswith(start) + + +def _normpath(p): + """ Wraps os.normpath() to avoid removing './' at the beginning + and '/' at the end. On windows it does the same with backslases + """ + initial_dotslash = p.startswith(os.curdir + os.sep) + initial_dotslash |= (ON_WINDOWS and p.startswith(os.curdir + os.altsep)) + p = p.rstrip() + trailing_slash = p.endswith(os.sep) + trailing_slash |= (ON_WINDOWS and p.endswith(os.altsep)) + p = os.path.normpath(p) + if initial_dotslash and p != '.': + p = os.path.join(os.curdir, p) + if trailing_slash: + p = os.path.join(p, '') + + if ON_WINDOWS and builtins.__xonsh_env__.get('FORCE_POSIX_PATHS'): + p = p.replace(os.sep, os.altsep) + + return p class Completer(object): """This provides a list of optional completions for the xonsh shell.""" + def __init__(self): + # initialize command cache + self._path_checksum = None + self._alias_checksum = None + self._path_mtime = -1 + self._cmds_cache = frozenset() + self._man_completer = ManCompleter() + try: + # FIXME this could be threaded for faster startup times + self._load_bash_complete_funcs() + # or we could make this lazy + self._load_bash_complete_files() + self.have_bash = True + except (subprocess.CalledProcessError, FileNotFoundError): + self.have_bash = False + def complete(self, prefix, line, begidx, endidx, ctx=None): - """Complete the string, given a possible execution context. + """Complete the string s, given a possible execution context. Parameters ---------- @@ -27,24 +113,326 @@ def complete(self, prefix, line, begidx, endidx, ctx=None): ------- rtn : list of str Possible completions of prefix, sorted alphabetically. - lprefix : int - Length of the prefix to be replaced in the completion. """ + space = ' ' # intern some strings for faster appending + slash = '/' + dot = '.' ctx = ctx or {} - for func in builtins.__xonsh__.completers.values(): - try: - out = func(prefix, line, begidx, endidx, ctx) - except StopIteration: - return set(), len(prefix) - if isinstance(out, cabc.Sequence): - res, lprefix = out + prefixlow = prefix.lower() + cmd = line.split(' ', 1)[0] + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + if begidx == 0: + # the first thing we're typing; could be python or subprocess, so + # anything goes. + rtn = self.cmd_complete(prefix) + elif cmd in self.bash_complete_funcs: + rtn = set() + for s in self.bash_complete(prefix, line, begidx, endidx): + if os.path.isdir(s.rstrip()): + s = s.rstrip() + slash + rtn.add(s) + if len(rtn) == 0: + rtn = self.path_complete(prefix) + return sorted(rtn) + elif prefix.startswith('${') or prefix.startswith('@('): + # python mode explicitly + rtn = set() + elif prefix.startswith('-'): + return sorted(self._man_completer.option_complete(prefix, cmd)) + elif cmd not in ctx: + if cmd == 'import' and begidx == len('import '): + # completing module to import + return sorted(self.module_complete(prefix)) + if cmd in self._all_commands(): + # subproc mode; do path completions + return sorted(self.path_complete(prefix, cdpath=True)) + else: + # if we're here, could be anything + rtn = set() + else: + # if we're here, we're not a command, but could be anything else + rtn = set() + rtn |= {s for s in XONSH_TOKENS if startswither(s, prefix, prefixlow)} + if ctx is not None: + if dot in prefix: + rtn |= self.attr_complete(prefix, ctx) else: - res = out - lprefix = len(prefix) - if res is not None and len(res) != 0: + rtn |= {s for s in ctx if startswither(s, prefix, prefixlow)} + rtn |= {s for s in dir(builtins) if startswither(s, prefix, prefixlow)} + rtn |= {s + space for s in builtins.aliases + if startswither(s, prefix, prefixlow)} + rtn |= self.path_complete(prefix) + return sorted(rtn) + + def _add_env(self, paths, prefix): + if prefix.startswith('$'): + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + key = prefix[1:] + keylow = key.lower() + paths.update({'$' + k for k in builtins.__xonsh_env__ if startswither(k, key, keylow)}) + + def _add_dots(self, paths, prefix): + if prefix in {'', '.'}: + paths.update({'./', '../'}) + if prefix == '..': + paths.add('../') + + def _add_cdpaths(self, paths, prefix): + """Completes current prefix using CDPATH""" + env = builtins.__xonsh_env__ + csc = env.get('CASE_SENSITIVE_COMPLETIONS') + for cdp in env.get('CDPATH'): + test_glob = os.path.join(cdp, prefix) + '*' + for s in iglobpath(test_glob, ignore_case=(not csc)): + if os.path.isdir(s): + paths.add(os.path.basename(s)) + + def cmd_complete(self, cmd): + """Completes a command name based on what is on the $PATH""" + space = ' ' + cmdlow = cmd.lower() + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + return {s + space + for s in self._all_commands() + if startswither(s, cmd, cmdlow)} + + def module_complete(self, prefix): + """Completes a name of a module to import.""" + prefixlow = prefix.lower() + modules = set(sys.modules.keys()) + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + return {s for s in modules if startswither(s, prefix, prefixlow)} + + def path_complete(self, prefix, cdpath=False): + """Completes based on a path name.""" + space = ' ' # intern some strings for faster appending + slash = '/' + tilde = '~' + paths = set() + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + if prefix.startswith("'") or prefix.startswith('"'): + prefix = prefix[1:] + for s in iglobpath(prefix + '*', ignore_case=(not csc)): + if space in s: + s = repr(s + (slash if os.path.isdir(s) else '')) + else: + s = s + (slash if os.path.isdir(s) else space) + paths.add(s) + if tilde in prefix: + home = os.path.expanduser(tilde) + paths = {s.replace(home, tilde) for s in paths} + self._add_env(paths, prefix) + self._add_dots(paths, prefix) + if cdpath: + self._add_cdpaths(paths, prefix) + return {_normpath(s) for s in paths} + + def bash_complete(self, prefix, line, begidx, endidx): + """Attempts BASH completion.""" + splt = line.split() + cmd = splt[0] + func = self.bash_complete_funcs.get(cmd, None) + fnme = self.bash_complete_files.get(cmd, None) + if func is None or fnme is None: + return set() + idx = n = 0 + for n, tok in enumerate(splt): + if tok == prefix: + idx = line.find(prefix, idx) + if idx >= begidx: + break + prev = tok + if len(prefix) == 0: + prefix = '""' + n += 1 + else: + prefix = shlex.quote(prefix) + + script = BASH_COMPLETE_SCRIPT.format(filename=fnme, + line=' '.join(shlex.quote(p) for p in splt), + comp_line=shlex.quote(line), + n=n, + func=func, + cmd=cmd, + end=endidx + 1, + prefix=prefix, + prev=shlex.quote(prev)) + try: + out = subprocess.check_output(['bash'], + input=script, + universal_newlines=True, + stderr=subprocess.PIPE) + except subprocess.CalledProcessError: + out = '' + + space = ' ' + rtn = {s + space if s[-1:].isalnum() else s for s in out.splitlines()} + return rtn + + def _source_completions(self): + srcs = [] + for f in builtins.__xonsh_env__.get('BASH_COMPLETIONS'): + if os.path.isfile(f): + # We need to "Unixify" Windows paths for Bash to understand + if ON_WINDOWS: + f = RE_WIN_DRIVE.sub(lambda m: '/{0}/'.format(m.group(1).lower()), f).replace('\\', '/') + srcs.append('source ' + f) + return srcs + + def _load_bash_complete_funcs(self): + self.bash_complete_funcs = bcf = {} + inp = self._source_completions() + if len(inp) == 0: + return + inp.append('complete -p\n') + out = subprocess.check_output(['bash'], input='\n'.join(inp), + universal_newlines=True) + for line in out.splitlines(): + head, cmd = line.rsplit(' ', 1) + if len(cmd) == 0 or cmd == 'cd': + continue + m = RE_DASHF.search(head) + if m is None: + continue + bcf[cmd] = m.group(1) + + def _load_bash_complete_files(self): + inp = self._source_completions() + if len(inp) == 0: + self.bash_complete_files = {} + return + if self.bash_complete_funcs: + inp.append('shopt -s extdebug') + bash_funcs = set(self.bash_complete_funcs.values()) + inp.append('declare -F ' + ' '.join([f for f in bash_funcs])) + inp.append('shopt -u extdebug\n') + out = subprocess.check_output(['bash'], input='\n'.join(inp), + universal_newlines=True) + func_files = {} + for line in out.splitlines(): + parts = line.split() + func_files[parts[0]] = parts[-1] + self.bash_complete_files = { + cmd: func_files[func] + for cmd, func in self.bash_complete_funcs.items() + if func in func_files + } + + def attr_complete(self, prefix, ctx): + """Complete attributes of an object.""" + attrs = set() + m = RE_ATTR.match(prefix) + if m is None: + return attrs + expr, attr = m.group(1, 3) + expr = subexpr_from_unbalanced(expr, '(', ')') + expr = subexpr_from_unbalanced(expr, '[', ']') + expr = subexpr_from_unbalanced(expr, '{', '}') + try: + val = builtins.evalx(expr, glbs=ctx) + except: # pylint:disable=bare-except + try: + val = builtins.evalx(expr, glbs=builtins.__dict__) + except: # pylint:disable=bare-except + return attrs # anything could have gone wrong! + opts = dir(val) + if len(attr) == 0: + opts = [o for o in opts if not o.startswith('_')] + else: + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + attrlow = attr.lower() + opts = [o for o in opts if startswither(o, attr, attrlow)] + prelen = len(prefix) + for opt in opts: + a = getattr(val, opt) + rpl = opt + '(' if callable(a) else opt + # note that prefix[:prelen-len(attr)] != prefix[:-len(attr)] + # when len(attr) == 0. + comp = prefix[:prelen - len(attr)] + rpl + attrs.add(comp) + return attrs + + def _all_commands(self): + path = builtins.__xonsh_env__.get('PATH', []) + # did PATH change? + path_hash = hash(tuple(path)) + cache_valid = path_hash == self._path_checksum + self._path_checksum = path_hash + # did aliases change? + al_hash = hash(tuple(sorted(builtins.aliases.keys()))) + self._alias_checksum = al_hash + cache_valid = cache_valid and al_hash == self._alias_checksum + pm = self._path_mtime + # did the contents of any directory in PATH change? + for d in filter(os.path.isdir, path): + m = os.stat(d).st_mtime + if m > pm: + pm = m + cache_valid = False + self._path_mtime = pm + if cache_valid: + return self._cmds_cache + allcmds = set() + for d in filter(os.path.isdir, path): + allcmds |= set(os.listdir(d)) + allcmds |= set(builtins.aliases.keys()) + self._cmds_cache = frozenset(allcmds) + return self._cmds_cache + + +OPTIONS_PATH = os.path.expanduser('~') + "/.xonsh_man_completions" +SCRAPE_RE = re.compile(r'^(?:\s*(?:-\w|--[a-z0-9-]+)[\s,])+', re.M) +INNER_OPTIONS_RE = re.compile(r'-\w|--[a-z0-9-]+') + + +class ManCompleter(object): + """Helper class that loads completions derived from man pages.""" + + def __init__(self): + self._load_cached_options() + + def __del__(self): + try: + self._save_cached_options() + except Exception: + pass + + def option_complete(self, prefix, cmd): + """Completes an option name, basing on content of man page.""" + csc = builtins.__xonsh_env__.get('CASE_SENSITIVE_COMPLETIONS') + startswither = startswithnorm if csc else startswithlow + if cmd not in self._options.keys(): + try: + manpage = subprocess.Popen(["man", cmd], + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL) + # This is a trick to get rid of reverse line feeds + text = subprocess.check_output(["col", "-b"], + stdin=manpage.stdout) + text = text.decode('utf-8') + scraped_text = ' '.join(SCRAPE_RE.findall(text)) + matches = INNER_OPTIONS_RE.findall(scraped_text) + self._options[cmd] = matches + except: + return set() + prefixlow = prefix.lower() + return {s for s in self._options[cmd] + if startswither(s, prefix, prefixlow)} - def sortkey(s): - return s.lstrip(''''"''').lower() + def _load_cached_options(self): + """Load options from file at startup.""" + try: + with open(OPTIONS_PATH, 'rb') as f: + self._options = pickle.load(f) + except: + self._options = {} - return tuple(sorted(res, key=sortkey)), lprefix - return set(), lprefix + def _save_cached_options(self): + """Save completions to file.""" + with open(OPTIONS_PATH, 'wb') as f: + pickle.dump(self._options, f) diff --git a/xonsh/completers/__init__.py b/xonsh/completers/__init__.py deleted file mode 100644 index 17eded8..0000000 --- a/xonsh/completers/__init__.py +++ /dev/null @@ -1,45 +0,0 @@ -# amalgamate exclude -import os as _os - -if _os.getenv("XONSH_DEBUG", ""): - pass -else: - import sys as _sys - - try: - from xonsh.completers import __amalgam__ - - bash_completion = __amalgam__ - _sys.modules["xonsh.completers.bash_completion"] = __amalgam__ - completer = __amalgam__ - _sys.modules["xonsh.completers.completer"] = __amalgam__ - pip = __amalgam__ - _sys.modules["xonsh.completers.pip"] = __amalgam__ - tools = __amalgam__ - _sys.modules["xonsh.completers.tools"] = __amalgam__ - xompletions = __amalgam__ - _sys.modules["xonsh.completers.xompletions"] = __amalgam__ - _aliases = __amalgam__ - _sys.modules["xonsh.completers._aliases"] = __amalgam__ - commands = __amalgam__ - _sys.modules["xonsh.completers.commands"] = __amalgam__ - man = __amalgam__ - _sys.modules["xonsh.completers.man"] = __amalgam__ - path = __amalgam__ - _sys.modules["xonsh.completers.path"] = __amalgam__ - python = __amalgam__ - _sys.modules["xonsh.completers.python"] = __amalgam__ - base = __amalgam__ - _sys.modules["xonsh.completers.base"] = __amalgam__ - bash = __amalgam__ - _sys.modules["xonsh.completers.bash"] = __amalgam__ - dirs = __amalgam__ - _sys.modules["xonsh.completers.dirs"] = __amalgam__ - init = __amalgam__ - _sys.modules["xonsh.completers.init"] = __amalgam__ - del __amalgam__ - except ImportError: - pass - del _sys -del _os -# amalgamate end diff --git a/xonsh/completers/_aliases.py b/xonsh/completers/_aliases.py deleted file mode 100644 index adaed7f..0000000 --- a/xonsh/completers/_aliases.py +++ /dev/null @@ -1,190 +0,0 @@ -import builtins -import collections - -import xonsh.lazyasd as xl - -from xonsh.completers.tools import justify - - -VALID_ACTIONS = xl.LazyObject( - lambda: frozenset({"add", "remove", "list"}), globals(), "VALID_ACTIONS" -) - - -def _add_one_completer(name, func, loc="end"): - new = collections.OrderedDict() - if loc == "start": - new[name] = func - for (k, v) in builtins.__xonsh__.completers.items(): - new[k] = v - elif loc == "end": - for (k, v) in builtins.__xonsh__.completers.items(): - new[k] = v - new[name] = func - else: - direction, rel = loc[0], loc[1:] - found = False - for (k, v) in builtins.__xonsh__.completers.items(): - if rel == k and direction == "<": - new[name] = func - found = True - new[k] = v - if rel == k and direction == ">": - new[name] = func - found = True - if not found: - new[name] = func - builtins.__xonsh__.completers.clear() - builtins.__xonsh__.completers.update(new) - - -def _list_completers(args, stdin=None, stack=None): - o = "Registered Completer Functions: \n" - _comp = builtins.__xonsh__.completers - ml = max((len(i) for i in _comp), default=0) - _strs = [] - for c in _comp: - if _comp[c].__doc__ is None: - doc = "No description provided" - else: - doc = " ".join(_comp[c].__doc__.split()) - doc = justify(doc, 80, ml + 3) - _strs.append("{: >{}} : {}".format(c, ml, doc)) - return o + "\n".join(_strs) + "\n" - - -def _remove_completer(args, stdin=None, stack=None): - err = None - if len(args) != 1: - err = "completer remove takes exactly 1 argument." - else: - name = args[0] - if name not in builtins.__xonsh__.completers: - err = ("The name %s is not a registered " "completer function.") % name - if err is None: - del builtins.__xonsh__.completers[name] - return - else: - return None, err + "\n", 1 - - -def _register_completer(args, stdin=None, stack=None): - err = None - if len(args) not in {2, 3}: - err = ( - "completer add takes either 2 or 3 arguments.\n" - "For help, run: completer help add" - ) - else: - name = args[0] - func_name = args[1] - if name in builtins.__xonsh__.completers: - err = ("The name %s is already a registered " "completer function.") % name - else: - if func_name in builtins.__xonsh__.ctx: - func = builtins.__xonsh__.ctx[func_name] - if not callable(func): - err = "%s is not callable" % func_name - else: - for frame_info in stack: - frame = frame_info[0] - if func_name in frame.f_locals: - func = frame.f_locals[func_name] - break - elif func_name in frame.f_globals: - func = frame.f_globals[func_name] - break - else: - err = "No such function: %s" % func_name - if err is None: - position = "start" if len(args) == 2 else args[2] - _add_one_completer(name, func, position) - else: - return None, err + "\n", 1 - - -def completer_alias(args, stdin=None, stdout=None, stderr=None, spec=None, stack=None): - err = None - if len(args) == 0 or args[0] not in (VALID_ACTIONS | {"help"}): - err = ( - "Please specify an action. Valid actions are: " - '"add", "remove", "list", or "help".' - ) - elif args[0] == "help": - if len(args) == 1 or args[1] not in VALID_ACTIONS: - return ( - "Valid actions are: add, remove, list. For help with a " - "specific action, run: completer help ACTION\n" - ) - elif args[1] == "add": - return COMPLETER_ADD_HELP_STR - elif args[1] == "remove": - return COMPLETER_REMOVE_HELP_STR - elif args[1] == "list": - return COMPLETER_LIST_HELP_STR - - if err is not None: - return None, err + "\n", 1 - - if args[0] == "add": - func = _register_completer - elif args[0] == "remove": - func = _remove_completer - elif args[0] == "list": - func = _list_completers - return func(args[1:], stdin=stdin, stack=stack) - - -COMPLETER_LIST_HELP_STR = """completer list: ordered list the active completers - -Usage: - completer remove -""" - -COMPLETER_REMOVE_HELP_STR = """completer remove: removes a completer from xonsh - -Usage: - completer remove NAME - -NAME is a unique name of a completer (run "completer list" to see the current - completers in order) -""" - -COMPLETER_ADD_HELP_STR = """completer add: adds a new completer to xonsh - -Usage: - completer add NAME FUNC [POS] - -NAME is a unique name to use in the listing (run "completer list" to see the - current completers in order) - -FUNC is the name of a completer function to use. This should be a function - of the following arguments, and should return a set of valid completions - for the given prefix. If this completer should not be used in a given - context, it should return an empty set or None. - - Arguments to FUNC: - * prefix: the string to be matched - * line: a string representing the whole current line, for context - * begidx: the index at which prefix starts in line - * endidx: the index at which prefix ends in line - * ctx: the current Python environment - - If the completer expands the prefix in any way, it should return a tuple - of two elements: the first should be the set of completions, and the - second should be the length of the modified prefix (for an example, see - xonsh.completers.path.complete_path). - -POS (optional) is a position into the list of completers at which the new - completer should be added. It can be one of the following values: - * "start" indicates that the completer should be added to the start of - the list of completers (it should be run before all others) - * "end" indicates that the completer should be added to the end of the - list of completers (it should be run after all others) - * ">KEY", where KEY is a pre-existing name, indicates that this should - be added after the completer named KEY - * " /dev/null || echo "-F _minimal" -}} - -_complete_stmt=$(_get_complete_statement) -if echo "$_complete_stmt" | grep --quiet -e "_minimal" -then - declare -f _completion_loader > /dev/null && _completion_loader {cmd} - _complete_stmt=$(_get_complete_statement) -fi - -_func=$(echo "$_complete_stmt" | grep -o -e '-F \w\+' | cut -d ' ' -f 2) -declare -f "$_func" > /dev/null || exit 1 - -echo "$_complete_stmt" -COMP_WORDS=({line}) -COMP_LINE={comp_line} -COMP_POINT=${{#COMP_LINE}} -COMP_COUNT={end} -COMP_CWORD={n} -$_func {cmd} {prefix} {prev} - -# print out completions, right-stripped if they contain no internal spaces -shopt -s extglob -for ((i=0;i<${{#COMPREPLY[*]}};i++)) -do - no_spaces="${{COMPREPLY[i]//[[:space:]]}}" - no_trailing_spaces="${{COMPREPLY[i]%%+([[:space:]])}}" - if [[ "$no_spaces" == "$no_trailing_spaces" ]]; then - echo "$no_trailing_spaces" - else - echo "${{COMPREPLY[i]}}" - fi -done -""" - - -def bash_completions( - prefix, - line, - begidx, - endidx, - env=None, - paths=None, - command=None, - quote_paths=_bash_quote_paths, - **kwargs -): - """Completes based on results from BASH completion. - - Parameters - ---------- - prefix : str - The string to match - line : str - The line that prefix appears on. - begidx : int - The index in line that prefix starts on. - endidx : int - The index in line that prefix ends on. - env : Mapping, optional - The environment dict to execute the Bash subprocess in. - paths : list or tuple of str or None, optional - This is a list (or tuple) of strings that specifies where the - ``bash_completion`` script may be found. The first valid path will - be used. For better performance, bash-completion v2.x is recommended - since it lazy-loads individual completion scripts. For both - bash-completion v1.x and v2.x, paths of individual completion scripts - (like ``.../completes/ssh``) do not need to be included here. The - default values are platform dependent, but sane. - command : str or None, optional - The /path/to/bash to use. If None, it will be selected based on the - from the environment and platform. - quote_paths : callable, optional - A functions that quotes file system paths. You shouldn't normally need - this as the default is acceptable 99+% of the time. This function should - return a set of the new paths and a boolean for whether the paths were - quoted. - - Returns - ------- - rtn : set of str - Possible completions of prefix - lprefix : int - Length of the prefix to be replaced in the completion. - """ - source = _get_bash_completions_source(paths) or "" - - if prefix.startswith("$"): # do not complete env variables - return set(), 0 - - splt = line.split() - cmd = splt[0] - idx = n = 0 - prev = "" - for n, tok in enumerate(splt): - if tok == prefix: - idx = line.find(prefix, idx) - if idx >= begidx: - break - prev = tok - - if len(prefix) == 0: - prefix_quoted = '""' - n += 1 - else: - prefix_quoted = shlex.quote(prefix) - - script = BASH_COMPLETE_SCRIPT.format( - source=source, - line=" ".join(shlex.quote(p) for p in splt), - comp_line=shlex.quote(line), - n=n, - cmd=shlex.quote(cmd), - end=endidx + 1, - prefix=prefix_quoted, - prev=shlex.quote(prev), - ) - - if command is None: - command = _bash_command(env=env) - try: - out = subprocess.check_output( - [command, "-c", script], - universal_newlines=True, - stderr=subprocess.PIPE, - env=env, - ) - if not out: - raise ValueError - except ( - subprocess.CalledProcessError, - FileNotFoundError, - UnicodeDecodeError, - ValueError, - ): - return set(), 0 - - out = out.splitlines() - complete_stmt = out[0] - out = set(out[1:]) - - # From GNU Bash document: The results of the expansion are prefix-matched - # against the word being completed - - # Ensure input to `commonprefix` is a list (now required by Python 3.6) - commprefix = os.path.commonprefix(list(out)) - strip_len = 0 - strip_prefix = prefix.strip("\"'") - while strip_len < len(strip_prefix) and strip_len < len(commprefix): - if commprefix[strip_len] == strip_prefix[strip_len]: - break - strip_len += 1 - - if "-o noquote" not in complete_stmt: - out, need_quotes = quote_paths(out, "", "") - if "-o nospace" in complete_stmt: - out = set([x.rstrip() for x in out]) - - return out, max(len(prefix) - strip_len, 0) - - -def bash_complete_line(line, return_line=True, **kwargs): - """Provides the completion from the end of the line. - - Parameters - ---------- - line : str - Line to complete - return_line : bool, optional - If true (default), will return the entire line, with the completion added. - If false, this will instead return the strings to append to the original line. - kwargs : optional - All other keyword arguments are passed to the bash_completions() function. - - Returns - ------- - rtn : set of str - Possible completions of prefix - """ - # set up for completing from the end of the line - split = line.split() - if len(split) > 1 and not line.endswith(" "): - prefix = split[-1] - begidx = len(line.rsplit(prefix)[0]) - else: - prefix = "" - begidx = len(line) - endidx = len(line) - # get completions - out, lprefix = bash_completions(prefix, line, begidx, endidx, **kwargs) - # reformat output - if return_line: - preline = line[:-lprefix] - rtn = {preline + o for o in out} - else: - rtn = {o[lprefix:] for o in out} - return rtn - - -def _bc_main(args=None): - """Runs complete_line() and prints the output.""" - from argparse import ArgumentParser - - p = ArgumentParser("bash_completions") - p.add_argument( - "--return-line", - action="store_true", - dest="return_line", - default=True, - help="will return the entire line, with the completion added", - ) - p.add_argument( - "--no-return-line", - action="store_false", - dest="return_line", - help="will instead return the strings to append to the original line", - ) - p.add_argument("line", help="line to complete") - ns = p.parse_args(args=args) - out = bash_complete_line(ns.line, return_line=ns.return_line) - for o in sorted(out): - print(o) - - -if __name__ == "__main__": - _bc_main() diff --git a/xonsh/completers/commands.py b/xonsh/completers/commands.py deleted file mode 100644 index 1f6e2e1..0000000 --- a/xonsh/completers/commands.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -import builtins - -import xonsh.tools as xt -import xonsh.platform as xp - -from xonsh.completers.tools import get_filter_function - -SKIP_TOKENS = {"sudo", "time", "timeit", "which", "showcmd", "man"} -END_PROC_TOKENS = {"|", "||", "&&", "and", "or"} - - -def complete_command(cmd, line, start, end, ctx): - """ - Returns a list of valid commands starting with the first argument - """ - space = " " - out = { - s + space - for s in builtins.__xonsh__.commands_cache - if get_filter_function()(s, cmd) - } - if xp.ON_WINDOWS: - out |= {i for i in xt.executables_in(".") if i.startswith(cmd)} - base = os.path.basename(cmd) - if os.path.isdir(base): - out |= { - os.path.join(base, i) for i in xt.executables_in(base) if i.startswith(cmd) - } - return out - - -def complete_skipper(cmd, line, start, end, ctx): - """ - Skip over several tokens (e.g., sudo) and complete based on the rest of the - line. - """ - parts = line.split(" ") - skip_part_num = 0 - for i, s in enumerate(parts): - if s in END_PROC_TOKENS: - skip_part_num = i + 1 - while len(parts) > skip_part_num: - if parts[skip_part_num] not in SKIP_TOKENS: - break - skip_part_num += 1 - - if skip_part_num == 0: - return set() - - # If there's no space following an END_PROC_TOKEN, insert one - if parts[-1] in END_PROC_TOKENS: - return(set(" "), 0) - - if len(parts) == skip_part_num + 1: - comp_func = complete_command - else: - comp = builtins.__xonsh__.shell.shell.completer - comp_func = comp.complete - - skip_len = len(" ".join(line[:skip_part_num])) + 1 - return comp_func( - cmd, " ".join(parts[skip_part_num:]), start - skip_len, end - skip_len, ctx - ) diff --git a/xonsh/completers/completer.py b/xonsh/completers/completer.py deleted file mode 100644 index 553c7d6..0000000 --- a/xonsh/completers/completer.py +++ /dev/null @@ -1,35 +0,0 @@ -import builtins - - -def complete_completer(prefix, line, start, end, ctx): - """ - Completion for "completer" - """ - args = line.split(" ") - if len(args) == 0 or args[0] != "completer": - return None - curix = args.index(prefix) - compnames = set(builtins.__xonsh__.completers.keys()) - if curix == 1: - possible = {"list", "help", "add", "remove"} - elif curix == 2: - if args[1] == "help": - possible = {"list", "add", "remove"} - elif args[1] == "remove": - possible = compnames - else: - raise StopIteration - else: - if args[1] != "add": - raise StopIteration - if curix == 3: - possible = {i for i, j in builtins.__xonsh__.ctx.items() if callable(j)} - elif curix == 4: - possible = ( - {"start", "end"} - | {">" + n for n in compnames} - | {"<" + n for n in compnames} - ) - else: - raise StopIteration - return {i for i in possible if i.startswith(prefix)} diff --git a/xonsh/completers/dirs.py b/xonsh/completers/dirs.py deleted file mode 100644 index 413e713..0000000 --- a/xonsh/completers/dirs.py +++ /dev/null @@ -1,26 +0,0 @@ -from xonsh.completers.man import complete_from_man -from xonsh.completers.path import complete_dir - - -def complete_cd(prefix, line, start, end, ctx): - """ - Completion for "cd", includes only valid directory names. - """ - if start != 0 and line.split(" ")[0] == "cd": - return complete_dir(prefix, line, start, end, ctx, True) - return set() - - -def complete_rmdir(prefix, line, start, end, ctx): - """ - Completion for "rmdir", includes only valid directory names. - """ - if start != 0 and line.split(" ")[0] == "rmdir": - opts = { - i - for i in complete_from_man("-", "rmdir -", 6, 7, ctx) - if i.startswith(prefix) - } - comps, lp = complete_dir(prefix, line, start, end, ctx, True) - return comps | opts, lp - return set() diff --git a/xonsh/completers/init.py b/xonsh/completers/init.py deleted file mode 100644 index dc7ee80..0000000 --- a/xonsh/completers/init.py +++ /dev/null @@ -1,39 +0,0 @@ -"""Constructor for xonsh completer objects.""" -import collections - -from xonsh.completers.pip import complete_pip -from xonsh.completers.man import complete_from_man -from xonsh.completers.bash import complete_from_bash -from xonsh.completers.base import complete_base -from xonsh.completers.path import complete_path -from xonsh.completers.dirs import complete_cd, complete_rmdir -from xonsh.completers.python import ( - complete_python, - complete_import, - complete_python_mode, -) -from xonsh.completers.commands import complete_skipper -from xonsh.completers.completer import complete_completer -from xonsh.completers.xompletions import complete_xonfig, complete_xontrib - - -def default_completers(): - """Creates a copy of the default completers.""" - return collections.OrderedDict( - [ - ("python_mode", complete_python_mode), - ("base", complete_base), - ("completer", complete_completer), - ("skip", complete_skipper), - ("pip", complete_pip), - ("cd", complete_cd), - ("rmdir", complete_rmdir), - ("xonfig", complete_xonfig), - ("xontrib", complete_xontrib), - ("bash", complete_from_bash), - ("man", complete_from_man), - ("import", complete_import), - ("python", complete_python), - ("path", complete_path), - ] - ) diff --git a/xonsh/completers/man.py b/xonsh/completers/man.py deleted file mode 100644 index 14af740..0000000 --- a/xonsh/completers/man.py +++ /dev/null @@ -1,57 +0,0 @@ -import os -import re -import pickle -import builtins -import subprocess - -import xonsh.lazyasd as xl - -from xonsh.completers.tools import get_filter_function - -OPTIONS = None -OPTIONS_PATH = None - - -@xl.lazyobject -def SCRAPE_RE(): - return re.compile(r"^(?:\s*(?:-\w|--[a-z0-9-]+)[\s,])+", re.M) - - -@xl.lazyobject -def INNER_OPTIONS_RE(): - return re.compile(r"-\w|--[a-z0-9-]+") - - -def complete_from_man(prefix, line, start, end, ctx): - """ - Completes an option name, based on the contents of the associated man - page. - """ - global OPTIONS, OPTIONS_PATH - if OPTIONS is None: - datadir = builtins.__xonsh__.env["XONSH_DATA_DIR"] - OPTIONS_PATH = os.path.join(datadir, "man_completions_cache") - try: - with open(OPTIONS_PATH, "rb") as f: - OPTIONS = pickle.load(f) - except Exception: - OPTIONS = {} - if not prefix.startswith("-"): - return set() - cmd = line.split()[0] - if cmd not in OPTIONS: - try: - manpage = subprocess.Popen( - ["man", cmd], stdout=subprocess.PIPE, stderr=subprocess.DEVNULL - ) - # This is a trick to get rid of reverse line feeds - text = subprocess.check_output(["col", "-b"], stdin=manpage.stdout) - text = text.decode("utf-8") - scraped_text = " ".join(SCRAPE_RE.findall(text)) - matches = INNER_OPTIONS_RE.findall(scraped_text) - OPTIONS[cmd] = matches - with open(OPTIONS_PATH, "wb") as f: - pickle.dump(OPTIONS, f) - except Exception: - return set() - return {s for s in OPTIONS[cmd] if get_filter_function()(s, prefix)} diff --git a/xonsh/completers/path.py b/xonsh/completers/path.py deleted file mode 100644 index 43e736d..0000000 --- a/xonsh/completers/path.py +++ /dev/null @@ -1,326 +0,0 @@ -import os -import re -import ast -import glob -import builtins - -import xonsh.tools as xt -import xonsh.platform as xp -import xonsh.lazyasd as xl - -from xonsh.completers.tools import get_filter_function - - -@xl.lazyobject -def PATTERN_NEED_QUOTES(): - pattern = r'\s`\$\{\}\,\*\(\)"\'\?&#' - if xp.ON_WINDOWS: - pattern += "%" - pattern = "[" + pattern + "]" + r"|\band\b|\bor\b" - return re.compile(pattern) - - -def cd_in_command(line): - """Returns True if "cd" is a token in the line, False otherwise.""" - lexer = builtins.__xonsh__.execer.parser.lexer - lexer.reset() - lexer.input(line) - have_cd = False - for tok in lexer: - if tok.type == "NAME" and tok.value == "cd": - have_cd = True - break - return have_cd - - -def _path_from_partial_string(inp, pos=None): - if pos is None: - pos = len(inp) - partial = inp[:pos] - startix, endix, quote = xt.check_for_partial_string(partial) - _post = "" - if startix is None: - return None - elif endix is None: - string = partial[startix:] - else: - if endix != pos: - _test = partial[endix:pos] - if not any(i == " " for i in _test): - _post = _test - else: - return None - string = partial[startix:endix] - end = xt.RE_STRING_START.sub("", quote) - _string = string - if not _string.endswith(end): - _string = _string + end - try: - val = ast.literal_eval(_string) - except (SyntaxError, ValueError): - return None - if isinstance(val, bytes): - env = builtins.__xonsh__.env - val = val.decode( - encoding=env.get("XONSH_ENCODING"), errors=env.get("XONSH_ENCODING_ERRORS") - ) - return string + _post, val + _post, quote, end - - -def _normpath(p): - """ - Wraps os.normpath() to avoid removing './' at the beginning - and '/' at the end. On windows it does the same with backslashes - """ - initial_dotslash = p.startswith(os.curdir + os.sep) - initial_dotslash |= xp.ON_WINDOWS and p.startswith(os.curdir + os.altsep) - p = p.rstrip() - trailing_slash = p.endswith(os.sep) - trailing_slash |= xp.ON_WINDOWS and p.endswith(os.altsep) - p = os.path.normpath(p) - if initial_dotslash and p != ".": - p = os.path.join(os.curdir, p) - if trailing_slash: - p = os.path.join(p, "") - if xp.ON_WINDOWS and builtins.__xonsh__.env.get("FORCE_POSIX_PATHS"): - p = p.replace(os.sep, os.altsep) - return p - - -def _startswithlow(x, start, startlow=None): - if startlow is None: - startlow = start.lower() - return x.startswith(start) or x.lower().startswith(startlow) - - -def _startswithnorm(x, start, startlow=None): - return x.startswith(start) - - -def _env(prefix): - if prefix.startswith("$"): - key = prefix[1:] - return { - "$" + k for k in builtins.__xonsh__.env if get_filter_function()(k, key) - } - return () - - -def _dots(prefix): - slash = xt.get_sep() - if slash == "\\": - slash = "" - if prefix in {"", "."}: - return ("." + slash, ".." + slash) - elif prefix == "..": - return (".." + slash,) - else: - return () - - -def _add_cdpaths(paths, prefix): - """Completes current prefix using CDPATH""" - env = builtins.__xonsh__.env - csc = env.get("CASE_SENSITIVE_COMPLETIONS") - glob_sorted = env.get("GLOB_SORTED") - for cdp in env.get("CDPATH"): - test_glob = os.path.join(cdp, prefix) + "*" - for s in xt.iglobpath( - test_glob, ignore_case=(not csc), sort_result=glob_sorted - ): - if os.path.isdir(s): - paths.add(os.path.basename(s)) - - -def _quote_to_use(x): - single = "'" - double = '"' - if single in x and double not in x: - return double - else: - return single - - -def _quote_paths(paths, start, end, append_end=True): - expand_path = builtins.__xonsh__.expand_path - out = set() - space = " " - backslash = "\\" - double_backslash = "\\\\" - slash = xt.get_sep() - orig_start = start - orig_end = end - # quote on all or none, to make readline completes to max prefix - need_quotes = any( - re.search(PATTERN_NEED_QUOTES, x) or (backslash in x and slash != backslash) - for x in paths - ) - - for s in paths: - start = orig_start - end = orig_end - if start == "" and need_quotes: - start = end = _quote_to_use(s) - if os.path.isdir(expand_path(s)): - _tail = slash - elif end == "": - _tail = space - else: - _tail = "" - if start != "" and "r" not in start and backslash in s: - start = "r%s" % start - s = s + _tail - if end != "": - if "r" not in start.lower(): - s = s.replace(backslash, double_backslash) - if s.endswith(backslash) and not s.endswith(double_backslash): - s += backslash - if end in s: - s = s.replace(end, "".join("\\%s" % i for i in end)) - s = start + s + end if append_end else start + s - out.add(s) - return out, need_quotes - - -def _joinpath(path): - # convert our tuple representation back into a string representing a path - if path is None: - return "" - elif len(path) == 0: - return "" - elif path == ("",): - return xt.get_sep() - elif path[0] == "": - return xt.get_sep() + _normpath(os.path.join(*path)) - else: - return _normpath(os.path.join(*path)) - - -def _splitpath(path): - # convert a path into an intermediate tuple representation - # if this tuple starts with '', it means that the path was an absolute path - path = _normpath(path) - if path.startswith(xt.get_sep()): - pre = ("",) - else: - pre = () - return pre + _splitpath_helper(path, ()) - - -def _splitpath_helper(path, sofar=()): - folder, path = os.path.split(path) - if path: - sofar = sofar + (path,) - if not folder or folder == xt.get_sep(): - return sofar[::-1] - elif xp.ON_WINDOWS and not path: - return os.path.splitdrive(folder)[:1] + sofar[::-1] - elif xp.ON_WINDOWS and os.path.splitdrive(path)[0]: - return sofar[::-1] - return _splitpath_helper(folder, sofar) - - -def subsequence_match(ref, typed, csc): - """ - Detects whether typed is a subsequence of ref. - - Returns ``True`` if the characters in ``typed`` appear (in order) in - ``ref``, regardless of exactly where in ``ref`` they occur. If ``csc`` is - ``False``, ignore the case of ``ref`` and ``typed``. - - Used in "subsequence" path completion (e.g., ``~/u/ro`` expands to - ``~/lou/carcohl``) - """ - if csc: - return _subsequence_match_iter(ref, typed) - else: - return _subsequence_match_iter(ref.lower(), typed.lower()) - - -def _subsequence_match_iter(ref, typed): - if len(typed) == 0: - return True - elif len(ref) == 0: - return False - elif ref[0] == typed[0]: - return _subsequence_match_iter(ref[1:], typed[1:]) - else: - return _subsequence_match_iter(ref[1:], typed) - - -def _expand_one(sofar, nextone, csc): - out = set() - glob_sorted = builtins.__xonsh__.env.get("GLOB_SORTED") - for i in sofar: - _glob = os.path.join(_joinpath(i), "*") if i is not None else "*" - for j in xt.iglobpath(_glob, sort_result=glob_sorted): - j = os.path.basename(j) - if subsequence_match(j, nextone, csc): - out.add((i or ()) + (j,)) - return out - - -def complete_path(prefix, line, start, end, ctx, cdpath=True, filtfunc=None): - """Completes based on a path name.""" - # string stuff for automatic quoting - path_str_start = "" - path_str_end = "" - append_end = True - p = _path_from_partial_string(line, end) - lprefix = len(prefix) - if p is not None: - lprefix = len(p[0]) - prefix = p[1] - path_str_start = p[2] - path_str_end = p[3] - if len(line) >= end + 1 and line[end] == path_str_end: - append_end = False - tilde = "~" - paths = set() - env = builtins.__xonsh__.env - csc = env.get("CASE_SENSITIVE_COMPLETIONS") - glob_sorted = env.get("GLOB_SORTED") - prefix = glob.escape(prefix) - for s in xt.iglobpath(prefix + "*", ignore_case=(not csc), sort_result=glob_sorted): - paths.add(s) - if len(paths) == 0 and env.get("SUBSEQUENCE_PATH_COMPLETION"): - # this block implements 'subsequence' matching, similar to fish and zsh. - # matches are based on subsequences, not substrings. - # e.g., ~/u/ro completes to ~/lou/carcolh - # see above functions for details. - p = _splitpath(os.path.expanduser(prefix)) - if len(p) != 0: - if p[0] == "": - basedir = ("",) - p = p[1:] - else: - basedir = None - matches_so_far = {basedir} - for i in p: - matches_so_far = _expand_one(matches_so_far, i, csc) - paths |= {_joinpath(i) for i in matches_so_far} - if len(paths) == 0 and env.get("FUZZY_PATH_COMPLETION"): - threshold = env.get("SUGGEST_THRESHOLD") - for s in xt.iglobpath( - os.path.dirname(prefix) + "*", - ignore_case=(not csc), - sort_result=glob_sorted, - ): - if xt.levenshtein(prefix, s, threshold) < threshold: - paths.add(s) - if tilde in prefix: - home = os.path.expanduser(tilde) - paths = {s.replace(home, tilde) for s in paths} - if cdpath and cd_in_command(line): - _add_cdpaths(paths, prefix) - paths = set(filter(filtfunc, paths)) - paths, _ = _quote_paths( - {_normpath(s) for s in paths}, path_str_start, path_str_end, append_end - ) - paths.update(filter(filtfunc, _dots(prefix))) - paths.update(filter(filtfunc, _env(prefix))) - return paths, lprefix - - -def complete_dir(prefix, line, start, end, ctx, cdpath=False): - return complete_path(prefix, line, start, end, cdpath, filtfunc=os.path.isdir) diff --git a/xonsh/completers/pip.py b/xonsh/completers/pip.py deleted file mode 100644 index b80acb9..0000000 --- a/xonsh/completers/pip.py +++ /dev/null @@ -1,56 +0,0 @@ -"""Completers for pip.""" -# pylint: disable=invalid-name, missing-docstring, unsupported-membership-test -# pylint: disable=unused-argument, not-an-iterable -import re -import subprocess - -import xonsh.lazyasd as xl - - -@xl.lazyobject -def PIP_RE(): - return re.compile(r"pip(?:\d|\.)*") - - -@xl.lazyobject -def PIP_LIST_RE(): - return re.compile(r"pip(?:\d|\.)* (?:uninstall|show)") - - -@xl.lazyobject -def ALL_COMMANDS(): - try: - help_text = str( - subprocess.check_output(["pip", "--help"], stderr=subprocess.DEVNULL) - ) - except FileNotFoundError: - return [] - commands = re.findall(r" (\w+) ", help_text) - return [c for c in commands if c not in ["completion", "help"]] - - -def complete_pip(prefix, line, begidx, endidx, ctx): - """Completes python's package manager pip""" - line_len = len(line.split()) - if ( - (line_len > 3) - or (line_len > 2 and line.endswith(" ")) - or (not PIP_RE.search(line)) - ): - return - if PIP_LIST_RE.search(line): - try: - items = subprocess.check_output(["pip", "list"], stderr=subprocess.DEVNULL) - except FileNotFoundError: - return set() - items = items.decode("utf-8").splitlines() - return set(i.split()[0] for i in items if i.split()[0].startswith(prefix)) - - if (line_len > 1 and line.endswith(" ")) or line_len > 2: - # "pip show " -> no complete (note space) - return - if prefix not in ALL_COMMANDS: - suggestions = [c for c in ALL_COMMANDS if c.startswith(prefix)] - if suggestions: - return suggestions, len(prefix) - return ALL_COMMANDS, len(prefix) diff --git a/xonsh/completers/python.py b/xonsh/completers/python.py deleted file mode 100644 index b9c6689..0000000 --- a/xonsh/completers/python.py +++ /dev/null @@ -1,290 +0,0 @@ -"""Completers for Python code""" -import re -import sys -import inspect -import builtins -import importlib -import collections.abc as cabc - -import xonsh.tools as xt -import xonsh.lazyasd as xl - -from xonsh.completers.tools import get_filter_function - - -@xl.lazyobject -def RE_ATTR(): - return re.compile(r"([^\s\(\)]+(\.[^\s\(\)]+)*)\.(\w*)$") - - -@xl.lazyobject -def XONSH_EXPR_TOKENS(): - return { - "and ", - "else", - "for ", - "if ", - "in ", - "is ", - "lambda ", - "not ", - "or ", - "+", - "-", - "/", - "//", - "%", - "**", - "|", - "&", - "~", - "^", - ">>", - "<<", - "<", - "<=", - ">", - ">=", - "==", - "!=", - ",", - "?", - "??", - "$(", - "${", - "$[", - "...", - "![", - "!(", - "@(", - "@$(", - "@", - } - - -@xl.lazyobject -def XONSH_STMT_TOKENS(): - return { - "as ", - "assert ", - "break", - "class ", - "continue", - "def ", - "del ", - "elif ", - "except ", - "finally:", - "from ", - "global ", - "import ", - "nonlocal ", - "pass", - "raise ", - "return ", - "try:", - "while ", - "with ", - "yield ", - "-", - "/", - "//", - "%", - "**", - "|", - "&", - "~", - "^", - ">>", - "<<", - "<", - "<=", - "->", - "=", - "+=", - "-=", - "*=", - "/=", - "%=", - "**=", - ">>=", - "<<=", - "&=", - "^=", - "|=", - "//=", - ";", - ":", - "..", - } - - -@xl.lazyobject -def XONSH_TOKENS(): - return set(XONSH_EXPR_TOKENS) | set(XONSH_STMT_TOKENS) - - -def complete_python(prefix, line, start, end, ctx): - """ - Completes based on the contents of the current Python environment, - the Python built-ins, and xonsh operators. - If there are no matches, split on common delimiters and try again. - """ - rtn = _complete_python(prefix, line, start, end, ctx) - if not rtn: - prefix = ( - re.split(r"\(|=|{|\[|,", prefix)[-1] - if not prefix.startswith(",") - else prefix - ) - start = line.find(prefix) - rtn = _complete_python(prefix, line, start, end, ctx) - return rtn, len(prefix) - return rtn - - -def _complete_python(prefix, line, start, end, ctx): - """ - Completes based on the contents of the current Python environment, - the Python built-ins, and xonsh operators. - """ - if line != "": - first = line.split()[0] - if first in builtins.__xonsh__.commands_cache and first not in ctx: - return set() - filt = get_filter_function() - rtn = set() - if ctx is not None: - if "." in prefix: - rtn |= attr_complete(prefix, ctx, filt) - args = python_signature_complete(prefix, line, end, ctx, filt) - rtn |= args - rtn |= {s for s in ctx if filt(s, prefix)} - else: - args = () - if len(args) == 0: - # not in a function call, so we can add non-expression tokens - rtn |= {s for s in XONSH_TOKENS if filt(s, prefix)} - else: - rtn |= {s for s in XONSH_EXPR_TOKENS if filt(s, prefix)} - rtn |= {s for s in dir(builtins) if filt(s, prefix)} - return rtn - - -def complete_python_mode(prefix, line, start, end, ctx): - """ - Python-mode completions for @( and ${ - """ - if not (prefix.startswith("@(") or prefix.startswith("${")): - return set() - prefix_start = prefix[:2] - python_matches = complete_python(prefix[2:], line, start - 2, end - 2, ctx) - if isinstance(python_matches, cabc.Sequence): - python_matches = python_matches[0] - return set(prefix_start + i for i in python_matches) - - -def _safe_eval(expr, ctx): - """Safely tries to evaluate an expression. If this fails, it will return - a (None, None) tuple. - """ - _ctx = None - xonsh_safe_eval = builtins.__xonsh__.execer.eval - try: - val = xonsh_safe_eval(expr, ctx, ctx, transform=False) - _ctx = ctx - except: # pylint:disable=bare-except - try: - val = xonsh_safe_eval(expr, builtins.__dict__, transform=False) - _ctx = builtins.__dict__ - except: # pylint:disable=bare-except - val = _ctx = None - return val, _ctx - - -def attr_complete(prefix, ctx, filter_func): - """Complete attributes of an object.""" - attrs = set() - m = RE_ATTR.match(prefix) - if m is None: - return attrs - expr, attr = m.group(1, 3) - expr = xt.subexpr_from_unbalanced(expr, "(", ")") - expr = xt.subexpr_from_unbalanced(expr, "[", "]") - expr = xt.subexpr_from_unbalanced(expr, "{", "}") - val, _ctx = _safe_eval(expr, ctx) - if val is None and _ctx is None: - return attrs - if len(attr) == 0: - opts = [o for o in dir(val) if not o.startswith("_")] - else: - opts = [o for o in dir(val) if filter_func(o, attr)] - prelen = len(prefix) - for opt in opts: - # check whether these options actually work (e.g., disallow 7.imag) - _expr = "{0}.{1}".format(expr, opt) - _val_, _ctx_ = _safe_eval(_expr, _ctx) - if _val_ is None and _ctx_ is None: - continue - a = getattr(val, opt) - if builtins.__xonsh__.env["COMPLETIONS_BRACKETS"]: - if callable(a): - rpl = opt + "(" - elif isinstance(a, (cabc.Sequence, cabc.Mapping)): - rpl = opt + "[" - else: - rpl = opt - else: - rpl = opt - # note that prefix[:prelen-len(attr)] != prefix[:-len(attr)] - # when len(attr) == 0. - comp = prefix[: prelen - len(attr)] + rpl - attrs.add(comp) - return attrs - - -def python_signature_complete(prefix, line, end, ctx, filter_func): - """Completes a python function (or other callable) call by completing - argument and keyword argument names. - """ - front = line[:end] - if xt.is_balanced(front, "(", ")"): - return set() - funcname = xt.subexpr_before_unbalanced(front, "(", ")") - val, _ctx = _safe_eval(funcname, ctx) - if val is None: - return set() - try: - sig = inspect.signature(val) - except ValueError: - return set() - args = {p + "=" for p in sig.parameters if filter_func(p, prefix)} - return args - - -def complete_import(prefix, line, start, end, ctx): - """ - Completes module names and contents for "import ..." and "from ... import - ..." - """ - ltoks = line.split() - ntoks = len(ltoks) - if ntoks == 2 and ltoks[0] == "from": - # completing module to import - return {"{} ".format(i) for i in complete_module(prefix)} - if ntoks > 1 and ltoks[0] == "import" and start == len("import "): - # completing module to import - return complete_module(prefix) - if ntoks > 2 and ltoks[0] == "from" and ltoks[2] == "import": - # complete thing inside a module - try: - mod = importlib.import_module(ltoks[1]) - except ImportError: - return set() - out = {i[0] for i in inspect.getmembers(mod) if i[0].startswith(prefix)} - return out - return set() - - -def complete_module(prefix): - return {s for s in sys.modules if get_filter_function()(s, prefix)} diff --git a/xonsh/completers/tools.py b/xonsh/completers/tools.py deleted file mode 100644 index 6b77b05..0000000 --- a/xonsh/completers/tools.py +++ /dev/null @@ -1,33 +0,0 @@ -"""Xonsh completer tools.""" -import builtins -import textwrap - - -def _filter_normal(s, x): - return s.startswith(x) - - -def _filter_ignorecase(s, x): - return s.lower().startswith(x.lower()) - - -def get_filter_function(): - """ - Return an appropriate filtering function for completions, given the valid - of $CASE_SENSITIVE_COMPLETIONS - """ - csc = builtins.__xonsh__.env.get("CASE_SENSITIVE_COMPLETIONS") - if csc: - return _filter_normal - else: - return _filter_ignorecase - - -def justify(s, max_length, left_pad=0): - """ - Re-wrap the string s so that each line is no more than max_length - characters long, padding all lines but the first on the left with the - string left_pad. - """ - txt = textwrap.wrap(s, width=max_length, subsequent_indent=" " * left_pad) - return "\n".join(txt) diff --git a/xonsh/completers/xompletions.py b/xonsh/completers/xompletions.py deleted file mode 100644 index c65dfa9..0000000 --- a/xonsh/completers/xompletions.py +++ /dev/null @@ -1,48 +0,0 @@ -"""Provides completions for xonsh internal utilities""" - -import xonsh.xontribs as xx -import xonsh.tools as xt - - -def complete_xonfig(prefix, line, start, end, ctx): - """Completion for ``xonfig``""" - args = line.split(" ") - if len(args) == 0 or args[0] != "xonfig": - return None - curix = args.index(prefix) - if curix == 1: - possible = {"info", "wizard", "styles", "colors", "-h"} - elif curix == 2 and args[1] == "colors": - possible = set(xt.color_style_names()) - else: - raise StopIteration - return {i for i in possible if i.startswith(prefix)} - - -def _list_installed_xontribs(): - meta = xx.xontrib_metadata() - installed = [] - for md in meta["xontribs"]: - name = md["name"] - spec = xx.find_xontrib(name) - if spec is not None: - installed.append(spec.name.rsplit(".")[-1]) - - return installed - - -def complete_xontrib(prefix, line, start, end, ctx): - """Completion for ``xontrib``""" - args = line.split(" ") - if len(args) == 0 or args[0] != "xontrib": - return None - curix = args.index(prefix) - if curix == 1: - possible = {"list", "load"} - elif curix == 2: - if args[1] == "load": - possible = _list_installed_xontribs() - else: - raise StopIteration - - return {i for i in possible if i.startswith(prefix)} diff --git a/xonsh/contexts.py b/xonsh/contexts.py deleted file mode 100644 index 217c7fc..0000000 --- a/xonsh/contexts.py +++ /dev/null @@ -1,117 +0,0 @@ -"""Context management tools for xonsh.""" -import sys -import textwrap -import builtins -from collections.abc import Mapping - - -class Block(object): - """This is a context manager for obtaining a block of lines without actually - executing the block. The lines are accessible as the 'lines' attribute. - This must be used as a macro. - """ - - __xonsh_block__ = str - - def __init__(self): - """ - Attributes - ---------- - lines : list of str or None - Block lines as if split by str.splitlines(), if available. - glbs : Mapping or None - Global execution context, ie globals(). - locs : Mapping or None - Local execution context, ie locals(). - """ - self.lines = self.glbs = self.locs = None - - def __enter__(self): - if not hasattr(self, "macro_block"): - raise XonshError(self.__class__.__name__ + " must be entered as a macro!") - self.lines = self.macro_block.splitlines() - self.glbs = self.macro_globals - if self.macro_locals is not self.macro_globals: - # leave locals as None when it is the same as globals - self.locs = self.macro_locals - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - -class Functor(Block): - """This is a context manager that turns the block into a callable - object, bound to the execution context it was created in. - """ - - def __init__(self, args=(), kwargs=None, rtn=""): - """ - Parameters - ---------- - args : Sequence of str, optional - A tuple of argument names for the functor. - kwargs : Mapping of str to values or list of item tuples, optional - Keyword argument names and values, if available. - rtn : str, optional - Name of object to return, if available. - - Attributes - ---------- - func : function - The underlying function object. This defaults to none and is set - after the the block is exited. - """ - super().__init__() - self.func = None - self.args = args - if kwargs is None: - self.kwargs = [] - elif isinstance(kwargs, Mapping): - self.kwargs = sorted(kwargs.items()) - else: - self.kwargs = kwargs - self.rtn = rtn - - def __enter__(self): - super().__enter__() - body = textwrap.indent(self.macro_block, " ") - uid = hash(body) + sys.maxsize # should always be a positive int - name = "__xonsh_functor_{uid}__".format(uid=uid) - # construct signature string - sig = rtn = "" - sig = ", ".join(self.args) - kwstr = ", ".join([k + "=None" for k, _ in self.kwargs]) - if len(kwstr) > 0: - sig = kwstr if len(sig) == 0 else sig + ", " + kwstr - # construct return string - rtn = str(self.rtn) - if len(rtn) > 0: - rtn = " return " + rtn + "\n" - # construct function string - fstr = "def {name}({sig}):\n{body}\n{rtn}" - fstr = fstr.format(name=name, sig=sig, body=body, rtn=rtn) - glbs = self.glbs - locs = self.locs - execer = builtins.__xonsh__.execer - execer.exec(fstr, glbs=glbs, locs=locs) - if locs is not None and name in locs: - func = locs[name] - elif name in glbs: - func = glbs[name] - else: - raise ValueError("Functor block could not be found in context.") - if len(self.kwargs) > 0: - func.__defaults__ = tuple(v for _, v in self.kwargs) - self.func = func - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def __call__(self, *args, **kwargs): - """Dispatches to func.""" - if self.func is None: - msg = "{} block with 'None' func not callable" - raise AttributeError(msg.formst(self.__class__.__name__)) - return self.func(*args, **kwargs) diff --git a/xonsh/data/data.txt b/xonsh/data/data.txt deleted file mode 100644 index 5734f98..0000000 --- a/xonsh/data/data.txt +++ /dev/null @@ -1,348 +0,0 @@ -(u'vnpy', 363) -(u'thoppe', 104) -(u'edc', 114) -(u'projectatomic', 110) -(u'jrosebr1', 289) -(u'compose', 305) -(u'4148', 163) -(u'Dobiasd', 179) -(u'blaze', 374) -(u'Bekt', 133) -(u'josauder', 251) -(u'nylas', 101) -(u'facebook', 2664) -(u'izacus', 139) -(u'HelloZeroNet', 1117) -(u'michael-lazar', 527) -(u'simonschellaert', 132) -(u'black-perl', 272) -(u'andersbll', 351) -(u'wecite', 274) -(u'stasinopoulos', 463) -(u'vmware', 669) -(u'waditu', 607) -(u'Leviathan1995', 372) -(u'TrackMaven', 146) -(u'tokland', 114) -(u'dellis23', 144) -(u'amueller', 143) -(u'NVIDIA', 733) -(u'trevorstephens', 207) -(u'rantonels', 312) -(u'klen', 293) -(u'vsergeev', 125) -(u'madisonmay', 911) -(u'DanMcInerney', 456) -(u'realpython', 181) -(u'graphific', 1418) -(u'shoopio', 163) -(u'conorpp', 123) -(u'sdiehl', 139) -(u'ajinabraham', 338) -(u'prashanthellina', 170) -(u'laike9m', 110) -(u'spyder-ide', 570) -(u'GedRap', 156) -(u'diafygi', 161) -(u'maxpumperla', 105) -(u'ClusterHQ', 282) -(u'dhilipsiva', 110) -(u'entropy1337', 133) -(u'dbpedia', 225) -(u'kelvinxu', 127) -(u'jsvine', 172) -(u'python', 147) -(u'OffensivePython', 305) -(u'jkbrzt', 476) -(u'quodlibet', 116) -(u'ulope', 200) -(u'andrea-cuttone', 157) -(u'alehander42', 193) -(u'KirkHadley', 139) -(u'revsys', 112) -(u'aigamedev', 583) -(u'sphinx-doc', 389) -(u'rietveld-codereview', 123) -(u'quantopian', 319) -(u'Deimos', 285) -(u'dgilland', 107) -(u'nccgroup', 146) -(u'nickjj', 138) -(u'nbs-system', 107) -(u'elceef', 272) -(u'compjour', 630) -(u'jrnewell', 207) -(u'bndr', 510) -(u'EricssonResearch', 188) -(u'raelgc', 701) -(u'avinassh', 2659) -(u'vitruvianscience', 152) -(u'achiku', 811) -(u'eugene-eeo', 144) -(u'paul-nechifor', 465) -(u'antiboredom', 582) -(u'datalib', 301) -(u'brandon-rhodes', 110) -(u'philpep', 101) -(u'python-cn', 151) -(u'jakevdp', 275) -(u'CiscoCloud', 1635) -(u'yandex', 174) -(u'jbornschein', 201) -(u'j-bennet', 307) -(u'Skycrab', 124) -(u'spotify', 376) -(u'pyparallel', 345) -(u'JakeCooper', 130) -(u'jorgebastida', 201) -(u'rouseguy', 253) -(u'pfalcon', 108) -(u'geerlingguy', 155) -(u'aliyun', 113) -(u'dorneanu', 124) -(u'cloudera', 500) -(u'drzoidberg33', 127) -(u'brennerm', 1154) -(u'TomAnthony', 336) -(u'clarkduvall', 310) -(u'matthewearl', 322) -(u'guidepointsecurity', 167) -(u'samim23', 260) -(u'rhiever', 1848) -(u'warner', 345) -(u'gojhonny', 304) -(u'joshenders', 210) -(u'itdxer', 108) -(u'fraser-hemp', 1366) -(u'omriher', 269) -(u'motet', 265) -(u'waf-project', 158) -(u'AlexEne', 135) -(u'jmcarp', 159) -(u'scrapinghub', 141) -(u'puniaze', 114) -(u'orf', 207) -(u'rasguanabana', 892) -(u'yasoob', 899) -(u'biicode', 205) -(u'pybee', 237) -(u'apprenticeharper', 625) -(u'angr', 279) -(u'Hypsurus', 214) -(u'PaulSec', 228) -(u'xybu', 111) -(u'ryankiros', 343) -(u'wapiflapi', 171) -(u'trustedsec', 337) -(u'jfalken', 135) -(u'ncrocfer', 337) -(u'mila-udem', 398) -(u'mardix', 119) -(u'automl', 227) -(u'ylovern', 171) -(u'jacebrowning', 135) -(u'Eyepea', 328) -(u'csvoss', 468) -(u'dittos', 135) -(u'icgood', 292) -(u'dciabrin', 114) -(u'minimaxir', 8920) -(u'brandonshin', 341) -(u'jonathanslenders', 1531) -(u'donnemartin', 7955) -(u'KeyboardFire', 3324) -(u'offapi', 142) -(u'no13bus', 367) -(u'tatanus', 108) -(u'Vector35', 281) -(u'micheloosterhof', 189) -(u'gamechanger', 220) -(u'isislab', 233) -(u'joxeankoret', 263) -(u'certsocietegenerale', 189) -(u'inconvergent', 283) -(u'rpp0', 235) -(u'yukuku', 225) -(u'lijiejie', 174) -(u'stitchfix', 928) -(u'LionSec', 108) -(u'n0tr00t', 108) -(u'ssteuteville', 150) -(u'getsentry', 661) -(u'agermanidis', 731) -(u'78', 354) -(u'piskvorky', 308) -(u'sogisha', 172) -(u'mre', 113) -(u'rbgirshick', 435) -(u'rtluckie', 112) -(u'dbcli', 2072) -(u'eternnoir', 170) -(u'recipy', 164) -(u'rasbt', 350) -(u'ant4g0nist', 125) -(u'm57', 598) -(u'TailorDev', 119) -(u'cosven', 169) -(u'zachriggle', 130) -(u'knownsec', 151) -(u'awslabs', 200) -(u'n1nj4sec', 827) -(u'armadaplatform', 158) -(u'DoctorTeeth', 129) -(u'atmb4u', 155) -(u'mmin18', 1063) -(u'divmain', 1206) -(u'fuzzing', 101) -(u'urbenlegend', 181) -(u'regebro', 245) -(u'QuantumFractal', 222) -(u'kootenpv', 243) -(u'Alexis-benoist', 120) -(u'sontek', 108) -(u'harshasrinivas', 147) -(u'rabidgremlin', 187) -(u'Slava', 171) -(u'10se1ucgo', 1884) -(u'fugue', 266) -(u'AlessandroZ', 1423) -(u'unixy', 107) -(u'pindexis', 582) -(u'Yinzo', 115) -(u'joelpx', 351) -(u'ryanss', 864) -(u'Dynetics', 110) -(u'ExPHAT', 385) -(u'hephaest0s', 1246) -(u'armbues', 112) -(u'pytest-dev', 360) -(u'moha99sa', 105) -(u'nvbn', 14856) -(u'scopatz', 511) -(u'andrewgodwin', 205) -(u'architv', 422) -(u'owais', 122) -(u'lorin', 107) -(u'takluyver', 170) -(u'garabik', 104) -(u'kevinlawler', 183) -(u'krockode', 455) -(u'IndicoDataSolutions', 384) -(u'x43x61x69', 107) -(u'semirook', 103) -(u'DIYgod', 195) -(u'fffonion', 101) -(u'snoack', 429) -(u'rodricios', 197) -(u'RealHacker', 132) -(u'krisfields', 246) -(u'nils-werner', 202) -(u'mementum', 124) -(u'uaca', 220) -(u'tburmeister', 226) -(u'CoreSecurity', 294) -(u'lukasschwab', 298) -(u'jcjohnson', 275) -(u'timothycrosley', 903) -(u'linkedin', 142) -(u'rushter', 2155) -(u'WatchPeopleCode', 209) -(u'hellerve', 426) -(u'JacobPlaster', 198) -(u'thampiman', 1101) -(u'timlib', 116) -(u'pfnet', 832) -(u'mitsuhiko', 231) -(u'ziggear', 153) -(u'datawire', 152) -(u'jrfonseca', 199) -(u'Lasagne', 151) -(u'ssut', 130) -(u'ChrisTruncer', 199) -(u'projectcalico', 167) -(u'lehui99', 219) -(u'ring04h', 390) -(u'amoffat', 1225) -(u'317070', 134) -(u'p-e-w', 734) -(u'srsudar', 735) -(u'aromanovich', 102) -(u'twoscoops', 244) -(u'fchollet', 2958) -(u'minrk', 113) -(u'chrissimpkins', 2211) -(u'qiwsir', 673) -(u'smarr', 612) -(u'anishathalye', 796) -(u'onyxfish', 120) -(u'hexahedria', 602) -(u'deepmind', 344) -(u'wooey', 484) -(u'manugarri', 129) -(u'joschu', 387) -(u'parkouss', 134) -(u'ShawnDEvans', 165) -(u'Max00355', 507) -(u'uptimejp', 156) -(u'google', 5720) -(u'yosinski', 352) -(u'buckyroberts', 211) -(u'borgbackup', 142) -(u'chriscannon', 233) -(u'benanne', 175) -(u'jandre', 257) -(u'zeruniverse', 232) -(u'asciimoo', 167) -(u'fengsp', 208) -(u'Koed00', 201) -(u'duerrp', 193) -(u'byt3bl33d3r', 464) -(u'amjith', 154) -(u'DrkSephy', 404) -(u'andelf', 832) -(u'vertical-knowledge', 109) -(u'Ivaylo-Popov', 248) -(u'lhartikk', 422) -(u'lmco', 313) -(u'paylogic', 161) -(u'karan', 2672) -(u'zulip', 3108) -(u'billpmurphy', 375) -(u'saelo', 163) -(u'rdnetto', 173) -(u'unlimitedlabs', 191) -(u'lonetwin', 129) -(u'closeio', 304) -(u'billryan', 178) -(u'larsenwork', 2720) -(u'Dionach', 150) -(u'ottogroup', 308) -(u'paypal', 221) -(u'install-logos', 116) -(u'Maratyszcza', 610) -(u'avehtari', 292) -(u'AtnNn', 125) -(u'Neo23x0', 208) -(u'justmarkham', 135) -(u'vladimarius', 168) -(u'graphql-python', 128) -(u'mattya', 115) -(u'coodict', 537) -(u'sourcelair', 268) -(u'walkr', 222) -(u'bittorrent', 363) -(u'hugsy', 201) -(u'ayoungprogrammer', 292) -(u'MorganZhang100', 249) -(u'ChrisBeaumont', 245) -(u'Netflix', 346) -(u'yahoo', 302) -(u'mhallsmoore', 136) -(u'XX-net', 2731) -(u'leandrotoledo', 175) -(u'Brobin', 154) -(u'pyskell', 732) -(u'NathanEpstein', 119) -(u'yadayada', 245) -(u'dxa4481', 216) -(u'lyst', 408) \ No newline at end of file diff --git a/xonsh/diff_history.py b/xonsh/diff_history.py index 98ff4ab..218b319 100644 --- a/xonsh/diff_history.py +++ b/xonsh/diff_history.py @@ -1,84 +1,80 @@ -# -*- coding: utf-8 -*- """Tools for diff'ing two xonsh history files in a meaningful fashion.""" -import difflib -import datetime -import itertools -import argparse +from datetime import datetime +from itertools import zip_longest +from difflib import SequenceMatcher -from xonsh.lazyjson import LazyJSON -from xonsh.tools import print_color +from xonsh import lazyjson +from xonsh.tools import TERM_COLORS -NO_COLOR_S = "{NO_COLOR}" -RED_S = "{RED}" -GREEN_S = "{GREEN}" -BOLD_RED_S = "{BOLD_RED}" -BOLD_GREEN_S = "{BOLD_GREEN}" +NO_COLOR = TERM_COLORS['NO_COLOR'].replace('\001', '').replace('\002', '') +RED = TERM_COLORS['RED'].replace('\001', '').replace('\002', '') +GREEN = TERM_COLORS['GREEN'].replace('\001', '').replace('\002', '') +BOLD_RED = TERM_COLORS['BOLD_RED'].replace('\001', '').replace('\002', '') +BOLD_GREEN = TERM_COLORS['BOLD_GREEN'].replace('\001', '').replace('\002', '') # intern some strings -REPLACE_S = "replace" -DELETE_S = "delete" -INSERT_S = "insert" -EQUAL_S = "equal" +REPLACE = 'replace' +DELETE = 'delete' +INSERT = 'insert' +EQUAL = 'equal' def bold_str_diff(a, b, sm=None): if sm is None: - sm = difflib.SequenceMatcher() - aline = RED_S + "- " - bline = GREEN_S + "+ " + sm = SequenceMatcher() + aline = RED + '- ' + bline = GREEN + '+ ' sm.set_seqs(a, b) for tag, i1, i2, j1, j2 in sm.get_opcodes(): - if tag == REPLACE_S: - aline += BOLD_RED_S + a[i1:i2] + RED_S - bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S - elif tag == DELETE_S: - aline += BOLD_RED_S + a[i1:i2] + RED_S - elif tag == INSERT_S: - bline += BOLD_GREEN_S + b[j1:j2] + GREEN_S - elif tag == EQUAL_S: + if tag == REPLACE: + aline += BOLD_RED + a[i1:i2] + RED + bline += BOLD_GREEN + b[j1:j2] + GREEN + elif tag == DELETE: + aline += BOLD_RED + a[i1:i2] + RED + elif tag == INSERT: + bline += BOLD_GREEN + b[j1:j2] + GREEN + elif tag == EQUAL: aline += a[i1:i2] bline += b[j1:j2] else: - raise RuntimeError("tag not understood") - return aline + NO_COLOR_S + "\n" + bline + NO_COLOR_S + "\n" + raise RuntimeError('tag not understood') + return aline + NO_COLOR + '\n' + bline + NO_COLOR +'\n' def redline(line): - return "{red}- {line}{no_color}\n".format(red=RED_S, line=line, no_color=NO_COLOR_S) + return '{red}- {line}{no_color}\n'.format(red=RED, line=line, no_color=NO_COLOR) def greenline(line): - return "{green}+ {line}{no_color}\n".format( - green=GREEN_S, line=line, no_color=NO_COLOR_S - ) + return '{green}+ {line}{no_color}\n'.format(green=GREEN, line=line, no_color=NO_COLOR) def highlighted_ndiff(a, b): - """Returns a highlighted string, with bold characters where different.""" - s = "" - sm = difflib.SequenceMatcher() + """Returns a highlited string, with bold charaters where different.""" + s = '' + sm = SequenceMatcher() sm.set_seqs(a, b) - linesm = difflib.SequenceMatcher() + linesm = SequenceMatcher() for tag, i1, i2, j1, j2 in sm.get_opcodes(): - if tag == REPLACE_S: - for aline, bline in itertools.zip_longest(a[i1:i2], b[j1:j2]): + if tag == REPLACE: + for aline, bline in zip_longest(a[i1:i2], b[j1:j2]): if bline is None: s += redline(aline) elif aline is None: s += greenline(bline) else: s += bold_str_diff(aline, bline, sm=linesm) - elif tag == DELETE_S: + elif tag == DELETE: for aline in a[i1:i2]: s += redline(aline) - elif tag == INSERT_S: + elif tag == INSERT: for bline in b[j1:j2]: s += greenline(bline) - elif tag == EQUAL_S: + elif tag == EQUAL: for aline in a[i1:i2]: - s += " " + aline + "\n" + s += ' ' + aline + '\n' else: - raise RuntimeError("tag not understood") + raise RuntimeError('tag not understood') return s @@ -100,10 +96,10 @@ def __init__(self, afile, bfile, reopen=False, verbose=False): verbose : bool, optional Whether to print a verbose amount of information. """ - self.a = LazyJSON(afile, reopen=reopen) - self.b = LazyJSON(bfile, reopen=reopen) + self.a = lazyjson.LazyJSON(afile, reopen=reopen) + self.b = lazyjson.LazyJSON(bfile, reopen=reopen) self.verbose = verbose - self.sm = difflib.SequenceMatcher(autojunk=False) + self.sm = SequenceMatcher(autojunk=False) def __del__(self): self.a.close() @@ -113,233 +109,204 @@ def __str__(self): return self.format() def _header_line(self, lj): - s = lj._f.name if hasattr(lj._f, "name") else "" - s += " (" + lj["sessionid"] + ")" - s += " [locked]" if lj["locked"] else " [unlocked]" - ts = lj["ts"].load() - ts0 = datetime.datetime.fromtimestamp(ts[0]) - s += " started: " + ts0.isoformat(" ") + s = lj._f.name if hasattr(lj._f, 'name') else '' + s += ' (' + lj['sessionid'] + ')' + s += ' [locked]' if lj['locked'] else ' [unlocked]' + ts = lj['ts'].load() + ts0 = datetime.fromtimestamp(ts[0]) + s += ' started: ' + ts0.isoformat(' ') if ts[1] is not None: - ts1 = datetime.datetime.fromtimestamp(ts[1]) - s += " stopped: " + ts1.isoformat(" ") + " runtime: " + str(ts1 - ts0) + ts1 = datetime.fromtimestamp(ts[1]) + s += ' stopped: ' + ts1.isoformat(' ') + ' runtime: ' + str(ts1 - ts0) return s def header(self): """Computes a header string difference.""" - s = "{red}--- {aline}{no_color}\n" "{green}+++ {bline}{no_color}" - s = s.format( - aline=self._header_line(self.a), - bline=self._header_line(self.b), - red=RED_S, - green=GREEN_S, - no_color=NO_COLOR_S, - ) + s = ('{red}--- {aline}{no_color}\n' + '{green}+++ {bline}{no_color}') + s = s.format(aline=self._header_line(self.a), bline=self._header_line(self.b), + red=RED, green=GREEN, no_color=NO_COLOR) return s def _env_both_diff(self, in_both, aenv, benv): sm = self.sm - s = "" + s = '' for key in sorted(in_both): aval = aenv[key] bval = benv[key] if aval == bval: continue - s += "{0!r} is in both, but differs\n".format(key) - s += bold_str_diff(aval, bval, sm=sm) + "\n" + s += '{0!r} is in both, but differs\n'.format(key) + s += bold_str_diff(aval, bval, sm=sm) + '\n' return s def _env_in_one_diff(self, x, y, color, xid, xenv): only_x = sorted(x - y) if len(only_x) == 0: - return "" + return '' if self.verbose: - xstr = ",\n".join( - [" {0!r}: {1!r}".format(key, xenv[key]) for key in only_x] - ) - xstr = "\n" + xstr + xstr = ',\n'.join([' {0!r}: {1!r}'.format(key, xenv[key]) \ + for key in only_x]) + xstr = '\n' + xstr else: - xstr = ", ".join(["{0!r}".format(key) for key in only_x]) - in_x = "These vars are only in {color}{xid}{no_color}: {{{xstr}}}\n\n" - return in_x.format(xid=xid, color=color, no_color=NO_COLOR_S, xstr=xstr) + xstr = ', '.join(['{0!r}'.format(key) for key in only_x]) + in_x = 'These vars are only in {color}{xid}{no_color}: {{{xstr}}}\n\n' + return in_x.format(xid=xid, color=color, no_color=NO_COLOR, xstr=xstr) def envdiff(self): """Computes the difference between the environments.""" - aenv = self.a["env"].load() - benv = self.b["env"].load() + aenv = self.a['env'].load() + benv = self.b['env'].load() akeys = frozenset(aenv) bkeys = frozenset(benv) in_both = akeys & bkeys if len(in_both) == len(akeys) == len(bkeys): keydiff = self._env_both_diff(in_both, aenv, benv) if len(keydiff) == 0: - return "" - in_a = in_b = "" + return '' + in_a = in_b = '' else: keydiff = self._env_both_diff(in_both, aenv, benv) - in_a = self._env_in_one_diff(akeys, bkeys, RED_S, self.a["sessionid"], aenv) - in_b = self._env_in_one_diff( - bkeys, akeys, GREEN_S, self.b["sessionid"], benv - ) - s = "Environment\n-----------\n" + in_a + keydiff + in_b + in_a = self._env_in_one_diff(akeys, bkeys, RED, self.a['sessionid'], aenv) + in_b = self._env_in_one_diff(bkeys, akeys, GREEN, self.b['sessionid'], benv) + s = 'Environment\n-----------\n' + in_a + keydiff + in_b return s def _cmd_in_one_diff(self, inp, i, xlj, xid, color): - s = "cmd #{i} only in {color}{xid}{no_color}:\n" - s = s.format(i=i, color=color, xid=xid, no_color=NO_COLOR_S) + s = 'cmd #{i} only in {color}{xid}{no_color}:\n' + s = s.format(i=i, color=color, xid=xid, no_color=NO_COLOR) lines = inp.splitlines() - lt = "{color}{pre}{no_color} {line}\n" - s += lt.format(color=color, no_color=NO_COLOR_S, line=lines[0], pre=">>>") + lt = '{color}{pre}{no_color} {line}\n' + s += lt.format(color=color, no_color=NO_COLOR, line=lines[0], pre='>>>') for line in lines[1:]: - s += lt.format(color=color, no_color=NO_COLOR_S, line=line, pre="...") + s += lt.format(color=color, no_color=NO_COLOR, line=line, pre='...') if not self.verbose: - return s + "\n" - out = xlj["cmds"][0].get("out", "Note: no output stored") - s += out.rstrip() + "\n\n" + return s + '\n' + out = xlj['cmds'][0].get('out', 'Note: no output stored') + s += out.rstrip() + '\n\n' return s def _cmd_out_and_rtn_diff(self, i, j): - s = "" - aout = self.a["cmds"][i].get("out", None) - bout = self.b["cmds"][j].get("out", None) + s = '' + aout = self.a['cmds'][i].get('out', None) + bout = self.b['cmds'][j].get('out', None) if aout is None and bout is None: - # s += 'Note: neither output stored\n' + #s += 'Note: neither output stored\n' pass elif bout is None: - aid = self.a["sessionid"] - s += "Note: only {red}{aid}{no_color} output stored\n".format( - red=RED_S, aid=aid, no_color=NO_COLOR_S - ) + aid = self.a['sessionid'] + s += 'Note: only {red}{aid}{no_color} output stored\n'.format(red=RED, + aid=aid, no_color=NO_COLOR) elif aout is None: - bid = self.b["sessionid"] - s += "Note: only {green}{bid}{no_color} output stored\n".format( - green=GREEN_S, bid=bid, no_color=NO_COLOR_S - ) + bid = self.b['sessionid'] + s += 'Note: only {green}{bid}{no_color} output stored\n'.format(green=GREEN, + bid=bid, no_color=NO_COLOR) elif aout != bout: - s += "Outputs differ\n" + s += 'Outputs differ\n' s += highlighted_ndiff(aout.splitlines(), bout.splitlines()) else: pass - artn = self.a["cmds"][i]["rtn"] - brtn = self.b["cmds"][j]["rtn"] + artn = self.a['cmds'][i]['rtn'] + brtn = self.b['cmds'][j]['rtn'] if artn != brtn: - s += ( - "Return vals {red}{artn}{no_color} & {green}{brtn}{no_color} differ\n" - ).format( - red=RED_S, green=GREEN_S, no_color=NO_COLOR_S, artn=artn, brtn=brtn - ) + s += ('Return vals {red}{artn}{no_color} & {green}{brtn}{no_color} differ\n' + ).format(red=RED, green=GREEN, no_color=NO_COLOR, artn=artn, brtn=brtn) return s def _cmd_replace_diff(self, i, ainp, aid, j, binp, bid): - s = ( - "cmd #{i} in {red}{aid}{no_color} is replaced by \n" - "cmd #{j} in {green}{bid}{no_color}:\n" - ) - s = s.format( - i=i, aid=aid, j=j, bid=bid, red=RED_S, green=GREEN_S, no_color=NO_COLOR_S - ) + s = ('cmd #{i} in {red}{aid}{no_color} is replaced by \n' + 'cmd #{j} in {green}{bid}{no_color}:\n') + s = s.format(i=i, aid=aid, j=j, bid=bid, red=RED, green=GREEN, no_color=NO_COLOR) s += highlighted_ndiff(ainp.splitlines(), binp.splitlines()) if not self.verbose: - return s + "\n" + return s + '\n' s += self._cmd_out_and_rtn_diff(i, j) - return s + "\n" + return s + '\n' def cmdsdiff(self): """Computes the difference of the commands themselves.""" - aid = self.a["sessionid"] - bid = self.b["sessionid"] - ainps = [c["inp"] for c in self.a["cmds"]] - binps = [c["inp"] for c in self.b["cmds"]] + aid = self.a['sessionid'] + bid = self.b['sessionid'] + ainps = [c['inp'] for c in self.a['cmds']] + binps = [c['inp'] for c in self.b['cmds']] sm = self.sm sm.set_seqs(ainps, binps) - s = "" + s = '' for tag, i1, i2, j1, j2 in sm.get_opcodes(): - if tag == REPLACE_S: - zipper = itertools.zip_longest - for i, ainp, j, binp in zipper( - range(i1, i2), ainps[i1:i2], range(j1, j2), binps[j1:j2] - ): + if tag == REPLACE: + for i, ainp, j, binp in zip_longest(range(i1, i2), ainps[i1:i2], + range(j1, j2), binps[j1:j2]): if j is None: - s += self._cmd_in_one_diff(ainp, i, self.a, aid, RED_S) + s += self._cmd_in_one_diff(ainp, i, self.a, aid, RED) elif i is None: - s += self._cmd_in_one_diff(binp, j, self.b, bid, GREEN_S) + s += self._cmd_in_one_diff(binp, j, self.b, bid, GREEN) else: self._cmd_replace_diff(i, ainp, aid, j, binp, bid) - elif tag == DELETE_S: + elif tag == DELETE: for i, inp in enumerate(ainps[i1:i2], i1): - s += self._cmd_in_one_diff(inp, i, self.a, aid, RED_S) - elif tag == INSERT_S: + s += self._cmd_in_one_diff(inp, i, self.a, aid, RED) + elif tag == INSERT: for j, inp in enumerate(binps[j1:j2], j1): - s += self._cmd_in_one_diff(inp, j, self.b, bid, GREEN_S) - elif tag == EQUAL_S: - for i, j in zip(range(i1, i2), range(j1, j2)): + s += self._cmd_in_one_diff(inp, j, self.b, bid, GREEN) + elif tag == EQUAL: + for i, j, in zip(range(i1, i2), range(j1, j2)): odiff = self._cmd_out_and_rtn_diff(i, j) if len(odiff) > 0: - h = ( - "cmd #{i} in {red}{aid}{no_color} input is the same as \n" - "cmd #{j} in {green}{bid}{no_color}, but output differs:\n" - ) - s += h.format( - i=i, - aid=aid, - j=j, - bid=bid, - red=RED_S, - green=GREEN_S, - no_color=NO_COLOR_S, - ) - s += odiff + "\n" + h = ('cmd #{i} in {red}{aid}{no_color} input is the same as \n' + 'cmd #{j} in {green}{bid}{no_color}, but output differs:\n') + s += h.format(i=i, aid=aid, j=j, bid=bid, red=RED, green=GREEN, + no_color=NO_COLOR) + s += odiff + '\n' else: - raise RuntimeError("tag not understood") + raise RuntimeError('tag not understood') if len(s) == 0: return s - return "Commands\n--------\n" + s + return 'Commands\n--------\n' + s def format(self): """Formats the difference between the two history files.""" s = self.header() ed = self.envdiff() if len(ed) > 0: - s += "\n\n" + ed + s += '\n\n' + ed cd = self.cmdsdiff() if len(cd) > 0: - s += "\n\n" + cd + s += '\n\n' + cd return s.rstrip() _HD_PARSER = None - -def dh_create_parser(p=None): +def _create_parser(p=None): global _HD_PARSER - p_was_none = p is None + p_was_none = (p is None) if _HD_PARSER is not None and p_was_none: return _HD_PARSER if p_was_none: - p = argparse.ArgumentParser( - "diff-history", description="diffs two xonsh history files" - ) - p.add_argument( - "--reopen", - dest="reopen", - default=False, - action="store_true", - help="make lazy file loading reopen files each time", - ) - p.add_argument( - "-v", - "--verbose", - dest="verbose", - default=False, - action="store_true", - help="whether to print even more information", - ) - p.add_argument("a", help="first file in diff") - p.add_argument("b", help="second file in diff") + from argparse import ArgumentParser + p = ArgumentParser('diff-history', description='diffs two xonsh history files') + p.add_argument('--reopen', dest='reopen', default=False, action='store_true', + help='make lazy file loading reopen files each time') + p.add_argument('-v', '--verbose', dest='verbose', default=False, action='store_true', + help='whether to print even more information') + p.add_argument('a', help='first file in diff') + p.add_argument('b', help='second file in diff') if p_was_none: _HD_PARSER = p return p -def dh_main_action(ns, hist=None, stdout=None, stderr=None): +def _main_action(ns, hist=None): hd = HistoryDiffer(ns.a, ns.b, reopen=ns.reopen, verbose=ns.verbose) - print_color(hd.format(), file=stdout) + print(hd.format()) + + +def main(args=None, stdin=None): + """Main entry point for history diff'ing""" + parser = _create_parser() + ns = parser.parse_args(args) + _main_action(ns) + + +if __name__ == '__main__': + main() diff --git a/xonsh/dirstack.py b/xonsh/dirstack.py index 7c8de2c..6e65a37 100644 --- a/xonsh/dirstack.py +++ b/xonsh/dirstack.py @@ -1,140 +1,12 @@ -# -*- coding: utf-8 -*- -"""Directory stack and associated utilities for the xonsh shell.""" +"""Directory stack and associated utilities for the xonsh shell. +""" import os -import glob -import argparse import builtins -import subprocess - -from xonsh.lazyasd import lazyobject -from xonsh.tools import get_sep -from xonsh.events import events -from xonsh.platform import ON_WINDOWS +from glob import iglob +from argparse import ArgumentParser DIRSTACK = [] """A list containing the currently remembered directories.""" -_unc_tempDrives = {} -""" drive: sharePath for temp drive letters we create for UNC mapping""" - - -def _unc_check_enabled() -> bool: - r"""Check whether CMD.EXE is enforcing no-UNC-as-working-directory check. - - Check can be disabled by setting {HKCU, HKLM}/SOFTWARE\Microsoft\Command Processor\DisableUNCCheck:REG_DWORD=1 - - Returns: - True if `CMD.EXE` is enforcing the check (default Windows situation) - False if check is explicitly disabled. - """ - if not ON_WINDOWS: - return - - import winreg - - wval = None - - try: - key = winreg.OpenKey( - winreg.HKEY_CURRENT_USER, r"software\microsoft\command processor" - ) - wval, wtype = winreg.QueryValueEx(key, "DisableUNCCheck") - winreg.CloseKey(key) - except OSError: - pass - - if wval is None: - try: - key2 = winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, r"software\microsoft\command processor" - ) - wval, wtype = winreg.QueryValueEx(key2, "DisableUNCCheck") - winreg.CloseKey(key2) - except OSError as e: # NOQA - pass - - return False if wval else True - - -def _is_unc_path(some_path) -> bool: - """True if path starts with 2 backward (or forward, due to python path hacking) slashes.""" - return ( - len(some_path) > 1 - and some_path[0] == some_path[1] - and some_path[0] in (os.sep, os.altsep) - ) - - -def _unc_map_temp_drive(unc_path) -> str: - r"""Map a new temporary drive letter for each distinct share, - unless `CMD.EXE` is not insisting on non-UNC working directory. - - Emulating behavior of `CMD.EXE` `pushd`, create a new mapped drive (starting from Z: towards A:, skipping existing - drive letters) for each new UNC path user selects. - - Args: - unc_path: the path specified by user. Assumed to be a UNC path of form \\\share... - - Returns: - a replacement for `unc_path` to be used as the actual new working directory. - Note that the drive letter may be a the same as one already mapped if the server and share portion of `unc_path` - is the same as one still active on the stack. - """ - global _unc_tempDrives - assert unc_path[1] in (os.sep, os.altsep), "unc_path is UNC form of path" - - if not _unc_check_enabled(): - return unc_path - else: - unc_share, rem_path = os.path.splitdrive(unc_path) - unc_share = unc_share.casefold() - for d in _unc_tempDrives: - if _unc_tempDrives[d] == unc_share: - return os.path.join(d, rem_path) - - for dord in range(ord("z"), ord("a"), -1): - d = chr(dord) + ":" - if not os.path.isdir(d): # find unused drive letter starting from z: - subprocess.check_output( - ["NET", "USE", d, unc_share], universal_newlines=True - ) - _unc_tempDrives[d] = unc_share - return os.path.join(d, rem_path) - - -def _unc_unmap_temp_drive(left_drive, cwd): - """Unmap a temporary drive letter if it is no longer needed. - Called after popping `DIRSTACK` and changing to new working directory, so we need stack *and* - new current working directory to be sure drive letter no longer needed. - - Args: - left_drive: driveletter (and colon) of working directory we just left - cwd: full path of new current working directory - """ - - global _unc_tempDrives - - if left_drive not in _unc_tempDrives: # if not one we've mapped, don't unmap it - return - - for p in DIRSTACK + [cwd]: # if still in use , don't unmap it. - if p.casefold().startswith(left_drive): - return - - _unc_tempDrives.pop(left_drive) - subprocess.check_output( - ["NET", "USE", left_drive, "/delete"], universal_newlines=True - ) - - -events.doc( - "on_chdir", - """ -on_chdir(olddir: str, newdir: str) -> None - -Fires when the current directory is changed for any reason. -""", -) - def _get_cwd(): try: @@ -143,31 +15,18 @@ def _get_cwd(): return None -def _change_working_directory(newdir, follow_symlinks=False): - env = builtins.__xonsh__.env - old = env["PWD"] - new = os.path.join(old, newdir) - absnew = os.path.abspath(new) - - if follow_symlinks: - absnew = os.path.realpath(absnew) - +def _change_working_directory(newdir): + env = builtins.__xonsh_env__ + old = _get_cwd() try: - os.chdir(absnew) + os.chdir(newdir) except (OSError, FileNotFoundError): - if new.endswith(get_sep()): - new = new[:-1] - if os.path.basename(new) == "..": - env["PWD"] = new - else: - if old is not None: - env["OLDPWD"] = old - if new is not None: - env["PWD"] = absnew - - # Fire event if the path actually changed - if old != env["PWD"]: - events.on_chdir.fire(olddir=old, newdir=env["PWD"]) + return + new = _get_cwd() + if old is not None: + env['OLDPWD'] = old + if new is not None: + env['PWD'] = new def _try_cdpath(apath): @@ -175,15 +34,14 @@ def _try_cdpath(apath): # In bash if a CDPATH is set, an unqualified local folder # is considered after all CDPATHs, example: # CDPATH=$HOME/src (with src/xonsh/ inside) - # $ cd xonsh -> src/xonsh (with xonsh/xonsh) + # $ cd xonsh -> src/xonsh (whith xonsh/xonsh) # a second $ cd xonsh has no effects, to move in the nested xonsh # in bash a full $ cd ./xonsh is needed. - # In xonsh a relative folder is always preferred. - env = builtins.__xonsh__.env - cdpaths = env.get("CDPATH") + # In xonsh a relative folder is allways preferred. + env = builtins.__xonsh_env__ + cdpaths = env.get('CDPATH') for cdp in cdpaths: - globber = builtins.__xonsh__.expand_path(os.path.join(cdp, apath)) - for cdpath_prefixed_path in glob.iglob(globber): + for cdpath_prefixed_path in iglob(os.path.join(cdp, apath)): return cdpath_prefixed_path return apath @@ -194,151 +52,95 @@ def cd(args, stdin=None): If no directory is specified (i.e. if `args` is None) then this changes to the current user's home directory. """ - env = builtins.__xonsh__.env - oldpwd = env.get("OLDPWD", None) - cwd = env["PWD"] - - follow_symlinks = False - if len(args) > 0 and args[0] == "-P": - follow_symlinks = True - del args[0] + env = builtins.__xonsh_env__ + oldpwd = env.get('OLDPWD', None) + cwd = _get_cwd() if len(args) == 0: - d = os.path.expanduser("~") + d = os.path.expanduser('~') elif len(args) == 1: d = os.path.expanduser(args[0]) if not os.path.isdir(d): - if d == "-": + if d == '-': if oldpwd is not None: d = oldpwd else: - return "", "cd: no previous directory stored\n", 1 - elif d.startswith("-"): + return '', 'cd: no previous directory stored\n' + elif d.startswith('-'): try: num = int(d[1:]) except ValueError: - return "", "cd: Invalid destination: {0}\n".format(d), 1 + return '', 'cd: Invalid destination: {0}\n'.format(d) if num == 0: - return None, None, 0 + return elif num < 0: - return "", "cd: Invalid destination: {0}\n".format(d), 1 + return '', 'cd: Invalid destination: {0}\n'.format(d) elif num > len(DIRSTACK): - e = "cd: Too few elements in dirstack ({0} elements)\n" - return "", e.format(len(DIRSTACK)), 1 + e = 'cd: Too few elements in dirstack ({0} elements)\n' + return '', e.format(len(DIRSTACK)) else: d = DIRSTACK[num - 1] else: d = _try_cdpath(d) else: - return ( - "", - ( - "cd takes 0 or 1 arguments, not {0}. An additional `-P` " - "flag can be passed in first position to follow symlinks." - "\n".format(len(args)) - ), - 1, - ) + return '', 'cd takes 0 or 1 arguments, not {0}\n'.format(len(args)) if not os.path.exists(d): - return "", "cd: no such file or directory: {0}\n".format(d), 1 + return '', 'cd: no such file or directory: {0}\n'.format(d) if not os.path.isdir(d): - return "", "cd: {0} is not a directory\n".format(d), 1 - if not os.access(d, os.X_OK): - return "", "cd: permission denied: {0}\n".format(d), 1 - if ( - ON_WINDOWS - and _is_unc_path(d) - and _unc_check_enabled() - and (not env.get("AUTO_PUSHD")) - ): - return ( - "", - "cd: can't cd to UNC path on Windows, unless $AUTO_PUSHD set or reg entry " - + r"HKCU\SOFTWARE\MICROSOFT\Command Processor\DisableUNCCheck:DWORD = 1" - + "\n", - 1, - ) - + return '', 'cd: {0} is not a directory\n'.format(d) # now, push the directory onto the dirstack if AUTO_PUSHD is set - if cwd is not None and env.get("AUTO_PUSHD"): - pushd(["-n", "-q", cwd]) - if ON_WINDOWS and _is_unc_path(d): - d = _unc_map_temp_drive(d) - _change_working_directory(d, follow_symlinks) - return None, None, 0 - - -@lazyobject -def pushd_parser(): - parser = argparse.ArgumentParser(prog="pushd") - parser.add_argument("dir", nargs="?") - parser.add_argument( - "-n", - dest="cd", - help="Suppresses the normal change of directory when" - " adding directories to the stack, so that only the" - " stack is manipulated.", - action="store_false", - ) - parser.add_argument( - "-q", - dest="quiet", - help="Do not call dirs, regardless of $PUSHD_SILENT", - action="store_true", - ) - return parser + if cwd is not None and env.get('AUTO_PUSHD'): + pushd(['-n', '-q', cwd]) + _change_working_directory(os.path.abspath(d)) + return None, None def pushd(args, stdin=None): - r"""xonsh command: pushd + """xonsh command: pushd Adds a directory to the top of the directory stack, or rotates the stack, making the new top of the stack the current working directory. - - On Windows, if the path is a UNC path (begins with `\\\`) and if the `DisableUNCCheck` registry - value is not enabled, creates a temporary mapped drive letter and sets the working directory there, emulating - behavior of `PUSHD` in `CMD.EXE` """ global DIRSTACK try: args = pushd_parser.parse_args(args) except SystemExit: - return None, None, 1 + return None, None - env = builtins.__xonsh__.env + env = builtins.__xonsh_env__ - pwd = env["PWD"] + pwd = env['PWD'] - if env.get("PUSHD_MINUS", False): - BACKWARD = "-" - FORWARD = "+" + if env.get('PUSHD_MINUS', False): + BACKWARD = '-' + FORWARD = '+' else: - BACKWARD = "+" - FORWARD = "-" + BACKWARD = '+' + FORWARD = '-' if args.dir is None: try: new_pwd = DIRSTACK.pop(0) except IndexError: - e = "pushd: Directory stack is empty\n" - return None, e, 1 + e = 'pushd: Directory stack is empty\n' + return None, e elif os.path.isdir(args.dir): new_pwd = args.dir else: try: num = int(args.dir[1:]) except ValueError: - e = "Invalid argument to pushd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to pushd: {0}\n' + return None, e.format(args.dir) if num < 0: - e = "Invalid argument to pushd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to pushd: {0}\n' + return None, e.format(args.dir) if num > len(DIRSTACK): - e = "Too few elements in dirstack ({0} elements)\n" - return None, e.format(len(DIRSTACK)), 1 + e = 'Too few elements in dirstack ({0} elements)\n' + return None, e.format(len(DIRSTACK)) elif args.dir.startswith(FORWARD): if num == len(DIRSTACK): new_pwd = None @@ -350,46 +152,23 @@ def pushd(args, stdin=None): else: new_pwd = DIRSTACK.pop(num - 1) else: - e = "Invalid argument to pushd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to pushd: {0}\n' + return None, e.format(args.dir) if new_pwd is not None: - if ON_WINDOWS and _is_unc_path(new_pwd): - new_pwd = _unc_map_temp_drive(new_pwd) if args.cd: DIRSTACK.insert(0, os.path.expanduser(pwd)) - _change_working_directory(new_pwd) + _change_working_directory(os.path.abspath(new_pwd)) else: - DIRSTACK.insert(0, os.path.expanduser(new_pwd)) + DIRSTACK.insert(0, os.path.expanduser(os.path.abspath(new_pwd))) - maxsize = env.get("DIRSTACK_SIZE") + maxsize = env.get('DIRSTACK_SIZE') if len(DIRSTACK) > maxsize: DIRSTACK = DIRSTACK[:maxsize] - if not args.quiet and not env.get("PUSHD_SILENT"): + if not args.quiet and not env.get('PUSHD_SILENT'): return dirs([], None) - return None, None, 0 - - -@lazyobject -def popd_parser(): - parser = argparse.ArgumentParser(prog="popd") - parser.add_argument("dir", nargs="?") - parser.add_argument( - "-n", - dest="cd", - help="Suppresses the normal change of directory when" - " adding directories to the stack, so that only the" - " stack is manipulated.", - action="store_false", - ) - parser.add_argument( - "-q", - dest="quiet", - help="Do not call dirs, regardless of $PUSHD_SILENT", - action="store_true", - ) - return parser + return None, None def popd(args, stdin=None): @@ -403,37 +182,37 @@ def popd(args, stdin=None): try: args = pushd_parser.parse_args(args) except SystemExit: - return None, None, 1 + return None, None - env = builtins.__xonsh__.env + env = builtins.__xonsh_env__ - if env.get("PUSHD_MINUS"): - BACKWARD = "-" - FORWARD = "+" + if env.get('PUSHD_MINUS'): + BACKWARD = '-' + FORWARD = '+' else: - BACKWARD = "-" - FORWARD = "+" + BACKWARD = '-' + FORWARD = '+' if args.dir is None: try: new_pwd = DIRSTACK.pop(0) except IndexError: - e = "popd: Directory stack is empty\n" - return None, e, 1 + e = 'popd: Directory stack is empty\n' + return None, e else: try: num = int(args.dir[1:]) except ValueError: - e = "Invalid argument to popd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to popd: {0}\n' + return None, e.format(args.dir) if num < 0: - e = "Invalid argument to popd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to popd: {0}\n' + return None, e.format(args.dir) if num > len(DIRSTACK): - e = "Too few elements in dirstack ({0} elements)\n" - return None, e.format(len(DIRSTACK)), 1 + e = 'Too few elements in dirstack ({0} elements)\n' + return None, e.format(len(DIRSTACK)) elif args.dir.startswith(FORWARD): if num == len(DIRSTACK): new_pwd = DIRSTACK.pop(0) @@ -447,59 +226,18 @@ def popd(args, stdin=None): new_pwd = None DIRSTACK.pop(num - 1) else: - e = "Invalid argument to popd: {0}\n" - return None, e.format(args.dir), 1 + e = 'Invalid argument to popd: {0}\n' + return None, e.format(args.dir) if new_pwd is not None: e = None if args.cd: - env = builtins.__xonsh__.env - pwd = env["PWD"] - - _change_working_directory(new_pwd) + _change_working_directory(os.path.abspath(new_pwd)) - if ON_WINDOWS: - drive, rem_path = os.path.splitdrive(pwd) - _unc_unmap_temp_drive(drive.casefold(), new_pwd) - - if not args.quiet and not env.get("PUSHD_SILENT"): + if not args.quiet and not env.get('PUSHD_SILENT'): return dirs([], None) - return None, None, 0 - - -@lazyobject -def dirs_parser(): - parser = argparse.ArgumentParser(prog="dirs") - parser.add_argument("N", nargs="?") - parser.add_argument( - "-c", - dest="clear", - help="Clears the directory stack by deleting all of" " the entries.", - action="store_true", - ) - parser.add_argument( - "-p", - dest="print_long", - help="Print the directory stack with one entry per" " line.", - action="store_true", - ) - parser.add_argument( - "-v", - dest="verbose", - help="Print the directory stack with one entry per" - " line, prefixing each entry with its index in the" - " stack.", - action="store_true", - ) - parser.add_argument( - "-l", - dest="long", - help="Produces a longer listing; the default listing" - " format uses a tilde to denote the home directory.", - action="store_true", - ) - return parser + return None, None def dirs(args, stdin=None): @@ -509,67 +247,119 @@ def dirs(args, stdin=None): to clear the directory stack. """ global DIRSTACK + dirstack = [os.path.expanduser(builtins.__xonsh_env__['PWD'])] + DIRSTACK + try: args = dirs_parser.parse_args(args) except SystemExit: return None, None - env = builtins.__xonsh__.env - dirstack = [os.path.expanduser(env["PWD"])] + DIRSTACK + env = builtins.__xonsh_env__ - if env.get("PUSHD_MINUS"): - BACKWARD = "-" - FORWARD = "+" + if env.get('PUSHD_MINUS'): + BACKWARD = '-' + FORWARD = '+' else: - BACKWARD = "-" - FORWARD = "+" + BACKWARD = '-' + FORWARD = '+' if args.clear: - DIRSTACK = [] - return None, None, 0 + dirstack = [] + return None, None if args.long: o = dirstack else: - d = os.path.expanduser("~") - o = [i.replace(d, "~") for i in dirstack] + d = os.path.expanduser('~') + o = [i.replace(d, '~') for i in dirstack] if args.verbose: - out = "" + out = '' pad = len(str(len(o) - 1)) for (ix, e) in enumerate(o): - blanks = " " * (pad - len(str(ix))) - out += "\n{0}{1} {2}".format(blanks, ix, e) + blanks = ' ' * (pad - len(str(ix))) + out += '\n{0}{1} {2}'.format(blanks, ix, e) out = out[1:] elif args.print_long: - out = "\n".join(o) + out = '\n'.join(o) else: - out = " ".join(o) + out = ' '.join(o) N = args.N if N is not None: try: num = int(N[1:]) except ValueError: - e = "Invalid argument to dirs: {0}\n" - return None, e.format(N), 1 + e = 'Invalid argument to dirs: {0}\n' + return None, e.format(N) if num < 0: - e = "Invalid argument to dirs: {0}\n" - return None, e.format(len(o)), 1 + e = 'Invalid argument to dirs: {0}\n' + return None, e.format(len(o)) if num >= len(o): - e = "Too few elements in dirstack ({0} elements)\n" - return None, e.format(len(o)), 1 + e = 'Too few elements in dirstack ({0} elements)\n' + return None, e.format(len(o)) if N.startswith(BACKWARD): idx = num elif N.startswith(FORWARD): idx = len(o) - 1 - num else: - e = "Invalid argument to dirs: {0}\n" - return None, e.format(N), 1 + e = 'Invalid argument to dirs: {0}\n' + return None, e.format(N) out = o[idx] - return out + "\n", None, 0 + return out + '\n', None + + +pushd_parser = ArgumentParser(prog="pushd") +pushd_parser.add_argument('dir', nargs='?') +pushd_parser.add_argument('-n', + dest='cd', + help='Suppresses the normal change of directory when' + ' adding directories to the stack, so that only the' + ' stack is manipulated.', + action='store_false') +pushd_parser.add_argument('-q', + dest='quiet', + help='Do not call dirs, regardless of $PUSHD_SILENT', + action='store_true') + +popd_parser = ArgumentParser(prog="popd") +popd_parser.add_argument('dir', nargs='?') +popd_parser.add_argument('-n', + dest='cd', + help='Suppresses the normal change of directory when' + ' adding directories to the stack, so that only the' + ' stack is manipulated.', + action='store_false') +popd_parser.add_argument('-q', + dest='quiet', + help='Do not call dirs, regardless of $PUSHD_SILENT', + action='store_true') + +dirs_parser = ArgumentParser(prog="dirs") +dirs_parser.add_argument('N', nargs='?') +dirs_parser.add_argument('-c', + dest='clear', + help='Clears the directory stack by deleting all of' + ' the entries.', + action='store_true') +dirs_parser.add_argument('-p', + dest='print_long', + help='Print the directory stack with one entry per' + ' line.', + action='store_true') +dirs_parser.add_argument('-v', + dest='verbose', + help='Print the directory stack with one entry per' + ' line, prefixing each entry with its index in the' + ' stack.', + action='store_true') +dirs_parser.add_argument('-l', + dest='long', + help='Produces a longer listing; the default listing' + ' format uses a tilde to denote the home directory.', + action='store_true') diff --git a/xonsh/dumb_shell.py b/xonsh/dumb_shell.py deleted file mode 100644 index 9e4f8da..0000000 --- a/xonsh/dumb_shell.py +++ /dev/null @@ -1,12 +0,0 @@ -"""A dumb shell for when $TERM == 'dumb', which usually happens in emacs.""" -import builtins - -from xonsh.readline_shell import ReadlineShell - - -class DumbShell(ReadlineShell): - """A dumb shell for when $TERM == 'dumb', which usually happens in emacs.""" - - def __init__(self, *args, **kwargs): - builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = "emacs" - super().__init__(*args, **kwargs) diff --git a/xonsh/environ.py b/xonsh/environ.py index 0913c70..e1ec72d 100644 --- a/xonsh/environ.py +++ b/xonsh/environ.py @@ -1,596 +1,71 @@ -# -*- coding: utf-8 -*- """Environment for the xonsh shell.""" import os import re -import sys -import pprint -import textwrap +import json +import socket +import string import locale import builtins -import warnings -import contextlib -import collections -import collections.abc as cabc import subprocess +from warnings import warn +from functools import wraps +from collections import MutableMapping, MutableSequence, MutableSet, namedtuple from xonsh import __version__ as XONSH_VERSION -from xonsh.lazyasd import LazyObject, lazyobject -from xonsh.codecache import run_script_with_cache +from xonsh.tools import TERM_COLORS, ON_WINDOWS, ON_MAC, ON_LINUX, ON_ARCH, \ + is_int, always_true, always_false, ensure_string, is_env_path, str_to_env_path, \ + env_path_to_str, is_bool, to_bool, bool_to_str, is_history_tuple, to_history_tuple, \ + history_tuple_to_str, is_float, string_types, is_string, DEFAULT_ENCODING from xonsh.dirstack import _get_cwd -from xonsh.events import events -from xonsh.platform import ( - BASH_COMPLETIONS_DEFAULT, - DEFAULT_ENCODING, - PATH_DEFAULT, - ON_WINDOWS, - ON_LINUX, - os_environ, -) - -from xonsh.style_tools import PTK2_STYLE - -from xonsh.tools import ( - always_true, - always_false, - detype, - ensure_string, - is_env_path, - str_to_env_path, - env_path_to_str, - is_bool, - to_bool, - bool_to_str, - is_history_tuple, - to_history_tuple, - history_tuple_to_str, - is_float, - is_string, - is_string_or_callable, - is_completions_display_value, - to_completions_display_value, - is_string_set, - csv_to_set, - set_to_csv, - is_int, - is_bool_seq, - to_bool_or_int, - bool_or_int_to_str, - csv_to_bool_seq, - bool_seq_to_csv, - DefaultNotGiven, - print_exception, - setup_win_unicode_console, - intensify_colors_on_win_setter, - is_dynamic_cwd_width, - to_dynamic_cwd_tuple, - dynamic_cwd_tuple_to_str, - is_logfile_opt, - to_logfile_opt, - logfile_opt_to_str, - executables_in, - is_nonstring_seq_of_strings, - pathsep_to_upper_seq, - seq_to_upper_pathsep, - print_color, - is_history_backend, - to_itself, - swap_values, - ptk2_color_depth_setter, - is_str_str_dict, - to_str_str_dict, - dict_to_str, -) -from xonsh.ansi_colors import ( - ansi_color_escape_code_to_name, - ansi_reverse_style, - ansi_style_by_name, -) -import xonsh.prompt.base as prompt - - -events.doc( - "on_envvar_new", - """ -on_envvar_new(name: str, value: Any) -> None - -Fires after a new environment variable is created. -Note: Setting envvars inside the handler might -cause a recursion until the limit. -""", -) - - -events.doc( - "on_envvar_change", - """ -on_envvar_change(name: str, oldvalue: Any, newvalue: Any) -> None - -Fires after an environment variable is changed. -Note: Setting envvars inside the handler might -cause a recursion until the limit. -""", -) +from xonsh.foreign_shells import DEFAULT_SHELLS, load_foreign_envs - -events.doc( - "on_pre_spec_run_ls", - """ -on_pre_spec_run_ls(spec: xonsh.built_ins.SubprocSpec) -> None - -Fires right before a SubprocSpec.run() is called for the ls -command. -""", -) - - -@lazyobject -def HELP_TEMPLATE(): - return ( - "{{INTENSE_RED}}{envvar}{{NO_COLOR}}:\n\n" - "{{INTENSE_YELLOW}}{docstr}{{NO_COLOR}}\n\n" - "default: {{CYAN}}{default}{{NO_COLOR}}\n" - "configurable: {{CYAN}}{configurable}{{NO_COLOR}}" - ) - - -@lazyobject -def LOCALE_CATS(): - lc = { - "LC_CTYPE": locale.LC_CTYPE, - "LC_COLLATE": locale.LC_COLLATE, - "LC_NUMERIC": locale.LC_NUMERIC, - "LC_MONETARY": locale.LC_MONETARY, - "LC_TIME": locale.LC_TIME, - } - if hasattr(locale, "LC_MESSAGES"): - lc["LC_MESSAGES"] = locale.LC_MESSAGES - return lc +LOCALE_CATS = { + 'LC_CTYPE': locale.LC_CTYPE, + 'LC_COLLATE': locale.LC_COLLATE, + 'LC_NUMERIC': locale.LC_NUMERIC, + 'LC_MONETARY': locale.LC_MONETARY, + 'LC_TIME': locale.LC_TIME, +} +if hasattr(locale, 'LC_MESSAGES'): + LOCALE_CATS['LC_MESSAGES'] = locale.LC_MESSAGES def locale_convert(key): """Creates a converter for a locale key.""" - def lc_converter(val): try: locale.setlocale(LOCALE_CATS[key], val) val = locale.setlocale(LOCALE_CATS[key]) except (locale.Error, KeyError): - msg = "Failed to set locale {0!r} to {1!r}".format(key, val) - warnings.warn(msg, RuntimeWarning) + warn('Failed to set locale {0!r} to {1!r}'.format(key, val), RuntimeWarning) return val - return lc_converter - -def to_debug(x): - """Converts value using to_bool_or_int() and sets this value on as the - execer's debug level. - """ - val = to_bool_or_int(x) - if ( - hasattr(builtins, "__xonsh__") - and hasattr(builtins.__xonsh__, "execer") - and builtins.__xonsh__.execer is not None - ): - builtins.__xonsh__.execer.debug_level = val - return val - - -# -# $LS_COLORS tools -# - - -class LsColors(cabc.MutableMapping): - """Helps convert to/from $LS_COLORS format, respecting the xonsh color style. - This accepts the same inputs as dict(). - """ - - default_settings = { - "*.7z": ("BOLD_RED",), - "*.Z": ("BOLD_RED",), - "*.aac": ("CYAN",), - "*.ace": ("BOLD_RED",), - "*.alz": ("BOLD_RED",), - "*.arc": ("BOLD_RED",), - "*.arj": ("BOLD_RED",), - "*.asf": ("BOLD_PURPLE",), - "*.au": ("CYAN",), - "*.avi": ("BOLD_PURPLE",), - "*.bmp": ("BOLD_PURPLE",), - "*.bz": ("BOLD_RED",), - "*.bz2": ("BOLD_RED",), - "*.cab": ("BOLD_RED",), - "*.cgm": ("BOLD_PURPLE",), - "*.cpio": ("BOLD_RED",), - "*.deb": ("BOLD_RED",), - "*.dl": ("BOLD_PURPLE",), - "*.dwm": ("BOLD_RED",), - "*.dz": ("BOLD_RED",), - "*.ear": ("BOLD_RED",), - "*.emf": ("BOLD_PURPLE",), - "*.esd": ("BOLD_RED",), - "*.flac": ("CYAN",), - "*.flc": ("BOLD_PURPLE",), - "*.fli": ("BOLD_PURPLE",), - "*.flv": ("BOLD_PURPLE",), - "*.gif": ("BOLD_PURPLE",), - "*.gl": ("BOLD_PURPLE",), - "*.gz": ("BOLD_RED",), - "*.jar": ("BOLD_RED",), - "*.jpeg": ("BOLD_PURPLE",), - "*.jpg": ("BOLD_PURPLE",), - "*.lha": ("BOLD_RED",), - "*.lrz": ("BOLD_RED",), - "*.lz": ("BOLD_RED",), - "*.lz4": ("BOLD_RED",), - "*.lzh": ("BOLD_RED",), - "*.lzma": ("BOLD_RED",), - "*.lzo": ("BOLD_RED",), - "*.m2v": ("BOLD_PURPLE",), - "*.m4a": ("CYAN",), - "*.m4v": ("BOLD_PURPLE",), - "*.mid": ("CYAN",), - "*.midi": ("CYAN",), - "*.mjpeg": ("BOLD_PURPLE",), - "*.mjpg": ("BOLD_PURPLE",), - "*.mka": ("CYAN",), - "*.mkv": ("BOLD_PURPLE",), - "*.mng": ("BOLD_PURPLE",), - "*.mov": ("BOLD_PURPLE",), - "*.mp3": ("CYAN",), - "*.mp4": ("BOLD_PURPLE",), - "*.mp4v": ("BOLD_PURPLE",), - "*.mpc": ("CYAN",), - "*.mpeg": ("BOLD_PURPLE",), - "*.mpg": ("BOLD_PURPLE",), - "*.nuv": ("BOLD_PURPLE",), - "*.oga": ("CYAN",), - "*.ogg": ("CYAN",), - "*.ogm": ("BOLD_PURPLE",), - "*.ogv": ("BOLD_PURPLE",), - "*.ogx": ("BOLD_PURPLE",), - "*.opus": ("CYAN",), - "*.pbm": ("BOLD_PURPLE",), - "*.pcx": ("BOLD_PURPLE",), - "*.pgm": ("BOLD_PURPLE",), - "*.png": ("BOLD_PURPLE",), - "*.ppm": ("BOLD_PURPLE",), - "*.qt": ("BOLD_PURPLE",), - "*.ra": ("CYAN",), - "*.rar": ("BOLD_RED",), - "*.rm": ("BOLD_PURPLE",), - "*.rmvb": ("BOLD_PURPLE",), - "*.rpm": ("BOLD_RED",), - "*.rz": ("BOLD_RED",), - "*.sar": ("BOLD_RED",), - "*.spx": ("CYAN",), - "*.svg": ("BOLD_PURPLE",), - "*.svgz": ("BOLD_PURPLE",), - "*.swm": ("BOLD_RED",), - "*.t7z": ("BOLD_RED",), - "*.tar": ("BOLD_RED",), - "*.taz": ("BOLD_RED",), - "*.tbz": ("BOLD_RED",), - "*.tbz2": ("BOLD_RED",), - "*.tga": ("BOLD_PURPLE",), - "*.tgz": ("BOLD_RED",), - "*.tif": ("BOLD_PURPLE",), - "*.tiff": ("BOLD_PURPLE",), - "*.tlz": ("BOLD_RED",), - "*.txz": ("BOLD_RED",), - "*.tz": ("BOLD_RED",), - "*.tzo": ("BOLD_RED",), - "*.tzst": ("BOLD_RED",), - "*.vob": ("BOLD_PURPLE",), - "*.war": ("BOLD_RED",), - "*.wav": ("CYAN",), - "*.webm": ("BOLD_PURPLE",), - "*.wim": ("BOLD_RED",), - "*.wmv": ("BOLD_PURPLE",), - "*.xbm": ("BOLD_PURPLE",), - "*.xcf": ("BOLD_PURPLE",), - "*.xpm": ("BOLD_PURPLE",), - "*.xspf": ("CYAN",), - "*.xwd": ("BOLD_PURPLE",), - "*.xz": ("BOLD_RED",), - "*.yuv": ("BOLD_PURPLE",), - "*.z": ("BOLD_RED",), - "*.zip": ("BOLD_RED",), - "*.zoo": ("BOLD_RED",), - "*.zst": ("BOLD_RED",), - "bd": ("BACKGROUND_BLACK", "YELLOW"), - "ca": ("BLACK", "BACKGROUND_RED"), - "cd": ("BACKGROUND_BLACK", "YELLOW"), - "di": ("BOLD_BLUE",), - "do": ("BOLD_PURPLE",), - "ex": ("BOLD_GREEN",), - "ln": ("BOLD_CYAN",), - "mh": ("NO_COLOR",), - "mi": ("NO_COLOR",), - "or": ("BACKGROUND_BLACK", "RED"), - "ow": ("BLUE", "BACKGROUND_GREEN"), - "pi": ("BACKGROUND_BLACK", "YELLOW"), - "rs": ("NO_COLOR",), - "sg": ("BLACK", "BACKGROUND_YELLOW"), - "so": ("BOLD_PURPLE",), - "st": ("WHITE", "BACKGROUND_BLUE"), - "su": ("WHITE", "BACKGROUND_RED"), - "tw": ("BLACK", "BACKGROUND_GREEN"), - } - - def __init__(self, *args, **kwargs): - self._d = dict(*args, **kwargs) - self._style = self._style_name = None - self._detyped = None - - def __getitem__(self, key): - return self._d[key] - - def __setitem__(self, key, value): - self._detyped = None - self._d[key] = value - - def __delitem__(self, key): - self._detyped = None - del self._d[key] - - def __len__(self): - return len(self._d) - - def __iter__(self): - yield from self._d - - def __str__(self): - return str(self._d) - - def __repr__(self): - return "{0}.{1}(...)".format( - self.__class__.__module__, self.__class__.__name__, self._d - ) - - def _repr_pretty_(self, p, cycle): - name = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__) - with p.group(0, name + "(", ")"): - if cycle: - p.text("...") - elif len(self): - p.break_() - p.pretty(dict(self)) - - def detype(self): - """De-types the instance, allowing it to be exported to the environment.""" - style = self.style - if self._detyped is None: - self._detyped = ":".join( - [ - key + "=" + ";".join([style[v] or "0" for v in val]) - for key, val in sorted(self._d.items()) - ] - ) - return self._detyped - - @property - def style_name(self): - """Current XONSH_COLOR_STYLE value""" - env = builtins.__xonsh__.env - env_style_name = env.get("XONSH_COLOR_STYLE") - if self._style_name is None or self._style_name != env_style_name: - self._style_name = env_style_name - self._style = self._dtyped = None - return self._style_name - - @property - def style(self): - """The ANSI color style for the current XONSH_COLOR_STYLE""" - style_name = self.style_name - if self._style is None: - self._style = ansi_style_by_name(style_name) - self._detyped = None - return self._style - - @classmethod - def fromstring(cls, s): - """Creates a new instance of the LsColors class from a colon-separated - string of dircolor-valid keys to ANSI color escape sequences. - """ - obj = cls() - # string inputs always use default codes, so translating into - # xonsh names should be done from defaults - reversed_default = ansi_reverse_style(style="default") - data = {} - for item in s.split(":"): - key, eq, esc = item.partition("=") - if not eq: - # not a valid item - continue - data[key] = ansi_color_escape_code_to_name( - esc, "default", reversed_style=reversed_default - ) - obj._d = data - return obj - - @classmethod - def fromdircolors(cls, filename=None): - """Constructs an LsColors instance by running dircolors. - If a filename is provided, it is passed down to the dircolors command. - """ - # assemble command - cmd = ["dircolors", "-b"] - if filename is not None: - cmd.append(filename) - # get env - if hasattr(builtins, "__xonsh__") and hasattr(builtins.__xonsh__, "env"): - denv = builtins.__xonsh__.env.detype() - else: - denv = None - # run dircolors - try: - out = subprocess.check_output( - cmd, env=denv, universal_newlines=True, stderr=subprocess.DEVNULL - ) - except (subprocess.CalledProcessError, FileNotFoundError): - return cls(cls.default_settings) - s = out.splitlines()[0] - _, _, s = s.partition("'") - s, _, _ = s.rpartition("'") - return cls.fromstring(s) - - @classmethod - def convert(cls, x): - """Converts an object to LsColors, if needed.""" - if isinstance(x, cls): - return x - elif isinstance(x, str): - return cls.fromstring(x) - elif isinstance(x, bytes): - return cls.fromstring(x.decode()) - else: - return cls(x) - - -def is_lscolors(x): - """Checks if an object is an instance of LsColors""" - return isinstance(x, LsColors) - - -@events.on_pre_spec_run_ls -def ensure_ls_colors_in_env(spec=None, **kwargs): - """This ensures that the $LS_COLORS environment variable is in the - environment. This fires exactly once upon the first time the - ls command is called. - """ - env = builtins.__xonsh__.env - if "LS_COLORS" not in env._d: - # this adds it to the env too - default_lscolors(env) - events.on_pre_spec_run_ls.discard(ensure_ls_colors_in_env) - - -# -# Ensurerers -# - -Ensurer = collections.namedtuple("Ensurer", ["validate", "convert", "detype"]) +Ensurer = namedtuple('Ensurer', ['validate', 'convert', 'detype']) Ensurer.__doc__ = """Named tuples whose elements are functions that represent environment variable validation, conversion, detyping. """ - -@lazyobject -def DEFAULT_ENSURERS(): - return { - "AUTO_CD": (is_bool, to_bool, bool_to_str), - "AUTO_PUSHD": (is_bool, to_bool, bool_to_str), - "AUTO_SUGGEST": (is_bool, to_bool, bool_to_str), - "AUTO_SUGGEST_IN_COMPLETIONS": (is_bool, to_bool, bool_to_str), - "BASH_COMPLETIONS": (is_env_path, str_to_env_path, env_path_to_str), - "CASE_SENSITIVE_COMPLETIONS": (is_bool, to_bool, bool_to_str), - re.compile(r"\w*DIRS$"): (is_env_path, str_to_env_path, env_path_to_str), - "COLOR_INPUT": (is_bool, to_bool, bool_to_str), - "COLOR_RESULTS": (is_bool, to_bool, bool_to_str), - "COMPLETIONS_BRACKETS": (is_bool, to_bool, bool_to_str), - "COMPLETIONS_CONFIRM": (is_bool, to_bool, bool_to_str), - "COMPLETIONS_DISPLAY": ( - is_completions_display_value, - to_completions_display_value, - str, - ), - "COMPLETIONS_MENU_ROWS": (is_int, int, str), - "COMPLETION_QUERY_LIMIT": (is_int, int, str), - "DIRSTACK_SIZE": (is_int, int, str), - "DOTGLOB": (is_bool, to_bool, bool_to_str), - "DYNAMIC_CWD_WIDTH": ( - is_dynamic_cwd_width, - to_dynamic_cwd_tuple, - dynamic_cwd_tuple_to_str, - ), - "DYNAMIC_CWD_ELISION_CHAR": (is_string, ensure_string, ensure_string), - "EXPAND_ENV_VARS": (is_bool, to_bool, bool_to_str), - "FORCE_POSIX_PATHS": (is_bool, to_bool, bool_to_str), - "FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE": (is_bool, to_bool, bool_to_str), - "FOREIGN_ALIASES_OVERRIDE": (is_bool, to_bool, bool_to_str), - "FUZZY_PATH_COMPLETION": (is_bool, to_bool, bool_to_str), - "GLOB_SORTED": (is_bool, to_bool, bool_to_str), - "HISTCONTROL": (is_string_set, csv_to_set, set_to_csv), - "IGNOREEOF": (is_bool, to_bool, bool_to_str), - "INTENSIFY_COLORS_ON_WIN": ( - always_false, - intensify_colors_on_win_setter, - bool_to_str, - ), - "LANG": (is_string, ensure_string, ensure_string), - "LC_COLLATE": (always_false, locale_convert("LC_COLLATE"), ensure_string), - "LC_CTYPE": (always_false, locale_convert("LC_CTYPE"), ensure_string), - "LC_MESSAGES": (always_false, locale_convert("LC_MESSAGES"), ensure_string), - "LC_MONETARY": (always_false, locale_convert("LC_MONETARY"), ensure_string), - "LC_NUMERIC": (always_false, locale_convert("LC_NUMERIC"), ensure_string), - "LC_TIME": (always_false, locale_convert("LC_TIME"), ensure_string), - "LS_COLORS": (is_lscolors, LsColors.convert, detype), - "LOADED_RC_FILES": (is_bool_seq, csv_to_bool_seq, bool_seq_to_csv), - "MOUSE_SUPPORT": (is_bool, to_bool, bool_to_str), - "MULTILINE_PROMPT": (is_string_or_callable, ensure_string, ensure_string), - re.compile(r"\w*PATH$"): (is_env_path, str_to_env_path, env_path_to_str), - "PATHEXT": ( - is_nonstring_seq_of_strings, - pathsep_to_upper_seq, - seq_to_upper_pathsep, - ), - "PRETTY_PRINT_RESULTS": (is_bool, to_bool, bool_to_str), - "PROMPT": (is_string_or_callable, ensure_string, ensure_string), - "PROMPT_FIELDS": (always_true, None, None), - "PROMPT_TOOLKIT_COLOR_DEPTH": ( - always_false, - ptk2_color_depth_setter, - ensure_string, - ), - "PUSHD_MINUS": (is_bool, to_bool, bool_to_str), - "PUSHD_SILENT": (is_bool, to_bool, bool_to_str), - "PTK_STYLE_OVERRIDES": (is_str_str_dict, to_str_str_dict, dict_to_str), - "RAISE_SUBPROC_ERROR": (is_bool, to_bool, bool_to_str), - "RIGHT_PROMPT": (is_string_or_callable, ensure_string, ensure_string), - "BOTTOM_TOOLBAR": (is_string_or_callable, ensure_string, ensure_string), - "SUBSEQUENCE_PATH_COMPLETION": (is_bool, to_bool, bool_to_str), - "SUGGEST_COMMANDS": (is_bool, to_bool, bool_to_str), - "SUGGEST_MAX_NUM": (is_int, int, str), - "SUGGEST_THRESHOLD": (is_int, int, str), - "SUPPRESS_BRANCH_TIMEOUT_MESSAGE": (is_bool, to_bool, bool_to_str), - "UPDATE_COMPLETIONS_ON_KEYPRESS": (is_bool, to_bool, bool_to_str), - "UPDATE_OS_ENVIRON": (is_bool, to_bool, bool_to_str), - "UPDATE_PROMPT_ON_KEYPRESS": (is_bool, to_bool, bool_to_str), - "VC_BRANCH_TIMEOUT": (is_float, float, str), - "VC_HG_SHOW_BRANCH": (is_bool, to_bool, bool_to_str), - "VI_MODE": (is_bool, to_bool, bool_to_str), - "VIRTUAL_ENV": (is_string, ensure_string, ensure_string), - "WIN_UNICODE_CONSOLE": (always_false, setup_win_unicode_console, bool_to_str), - "XONSHRC": (is_env_path, str_to_env_path, env_path_to_str), - "XONSH_APPEND_NEWLINE": (is_bool, to_bool, bool_to_str), - "XONSH_AUTOPAIR": (is_bool, to_bool, bool_to_str), - "XONSH_CACHE_SCRIPTS": (is_bool, to_bool, bool_to_str), - "XONSH_CACHE_EVERYTHING": (is_bool, to_bool, bool_to_str), - "XONSH_COLOR_STYLE": (is_string, ensure_string, ensure_string), - "XONSH_DEBUG": (always_false, to_debug, bool_or_int_to_str), - "XONSH_ENCODING": (is_string, ensure_string, ensure_string), - "XONSH_ENCODING_ERRORS": (is_string, ensure_string, ensure_string), - "XONSH_HISTORY_BACKEND": (is_history_backend, to_itself, ensure_string), - "XONSH_HISTORY_FILE": (is_string, ensure_string, ensure_string), - "XONSH_HISTORY_MATCH_ANYWHERE": (is_bool, to_bool, bool_to_str), - "XONSH_HISTORY_SIZE": ( - is_history_tuple, - to_history_tuple, - history_tuple_to_str, - ), - "XONSH_LOGIN": (is_bool, to_bool, bool_to_str), - "XONSH_PROC_FREQUENCY": (is_float, float, str), - "XONSH_SHOW_TRACEBACK": (is_bool, to_bool, bool_to_str), - "XONSH_STDERR_PREFIX": (is_string, ensure_string, ensure_string), - "XONSH_STDERR_POSTFIX": (is_string, ensure_string, ensure_string), - "XONSH_STORE_STDOUT": (is_bool, to_bool, bool_to_str), - "XONSH_STORE_STDIN": (is_bool, to_bool, bool_to_str), - "XONSH_TRACEBACK_LOGFILE": (is_logfile_opt, to_logfile_opt, logfile_opt_to_str), - "XONSH_DATETIME_FORMAT": (is_string, ensure_string, ensure_string), - } - +DEFAULT_ENSURERS = { + 'AUTO_SUGGEST': (is_bool, to_bool, bool_to_str), + 'BASH_COMPLETIONS': (is_env_path, str_to_env_path, env_path_to_str), + 'CASE_SENSITIVE_COMPLETIONS': (is_bool, to_bool, bool_to_str), + re.compile('\w*DIRS'): (is_env_path, str_to_env_path, env_path_to_str), + 'LC_COLLATE': (always_false, locale_convert('LC_COLLATE'), ensure_string), + 'LC_CTYPE': (always_false, locale_convert('LC_CTYPE'), ensure_string), + 'LC_MESSAGES': (always_false, locale_convert('LC_MESSAGES'), ensure_string), + 'LC_MONETARY': (always_false, locale_convert('LC_MONETARY'), ensure_string), + 'LC_NUMERIC': (always_false, locale_convert('LC_NUMERIC'), ensure_string), + 'LC_TIME': (always_false, locale_convert('LC_TIME'), ensure_string), + 'MOUSE_SUPPORT': (is_bool, to_bool, bool_to_str), + re.compile('\w*PATH'): (is_env_path, str_to_env_path, env_path_to_str), + 'TEEPTY_PIPE_DELAY': (is_float, float, str), + 'XONSHRC': (is_env_path, str_to_env_path, env_path_to_str), + 'XONSH_ENCODING': (is_string, ensure_string, ensure_string), + 'XONSH_ENCODING_ERRORS': (is_string, ensure_string, ensure_string), + 'XONSH_HISTORY_SIZE': (is_history_tuple, to_history_tuple, history_tuple_to_str), + 'XONSH_STORE_STDOUT': (is_bool, to_bool, bool_to_str), +} # # Defaults @@ -600,19 +75,25 @@ def default_value(f): f._xonsh_callable_default = True return f - def is_callable_default(x): """Checks if a value is a callable default.""" - return callable(x) and getattr(x, "_xonsh_callable_default", False) - - -DEFAULT_TITLE = "{current_job:{} | }{user}@{hostname}: {cwd} | xonsh" - + return callable(x) and getattr(x, '_xonsh_callable_default', False) + +DEFAULT_PROMPT = ('{BOLD_RED}{user} ' + '{BOLD_WHITE}at ' + '{BOLD_RED}{hostname} ' + '{BOLD_WHITE}in ' + '{BOLD_GREEN}{cwd} ' + '{BOLD_WHITE}on' + '{branch_color}{curr_branch} ' + '{BOLD_WHITE}\n' + '${NO_COLOR} ') +DEFAULT_TITLE = '{user} at {hostname}: {cwd} | xonsh' @default_value def xonsh_data_dir(env): """Ensures and returns the $XONSH_DATA_DIR""" - xdd = os.path.expanduser(os.path.join(env.get("XDG_DATA_HOME"), "xonsh")) + xdd = os.path.join(env.get('XDG_DATA_HOME'), 'xonsh') os.makedirs(xdd, exist_ok=True) return xdd @@ -620,921 +101,210 @@ def xonsh_data_dir(env): @default_value def xonsh_config_dir(env): """Ensures and returns the $XONSH_CONFIG_DIR""" - xcd = os.path.expanduser(os.path.join(env.get("XDG_CONFIG_HOME"), "xonsh")) + xcd = os.path.join(env.get('XDG_CONFIG_HOME'), 'xonsh') os.makedirs(xcd, exist_ok=True) return xcd +@default_value def xonshconfig(env): """Ensures and returns the $XONSHCONFIG""" - xcd = env.get("XONSH_CONFIG_DIR") - xc = os.path.join(xcd, "config.json") + xcd = env.get('XONSH_CONFIG_DIR') + xc = os.path.join(xcd, 'config.json') return xc -@default_value -def default_xonshrc(env): - """Creates a new instance of the default xonshrc tuple.""" - xcdrc = os.path.join(xonsh_config_dir(env), "rc.xsh") - if ON_WINDOWS: - dxrc = ( - os.path.join(os_environ["ALLUSERSPROFILE"], "xonsh", "xonshrc"), - xcdrc, - os.path.expanduser("~/.xonshrc"), - ) - else: - dxrc = ("/etc/xonshrc", xcdrc, os.path.expanduser("~/.xonshrc")) - # Check if old config file exists and issue warning - old_config_filename = xonshconfig(env) - if os.path.isfile(old_config_filename): - print( - "WARNING! old style configuration (" - + old_config_filename - + ") is no longer supported. " - + "Please migrate to xonshrc." - ) - return dxrc - - -@default_value -def xonsh_append_newline(env): - """Appends a newline if we are in interactive mode""" - return env.get("XONSH_INTERACTIVE", False) - - -@default_value -def default_lscolors(env): - """Gets a default instanse of LsColors""" - inherited_lscolors = os_environ.get("LS_COLORS", None) - if inherited_lscolors is None: - lsc = LsColors.fromdircolors() - else: - lsc = LsColors.fromstring(inherited_lscolors) - # have to place this in the env, so it is applied - env["LS_COLORS"] = lsc - return lsc - - # Default values should generally be immutable, that way if a user wants # to set them they have to do a copy and write them to the environment. # try to keep this sorted. -@lazyobject -def DEFAULT_VALUES(): - dv = { - "AUTO_CD": False, - "AUTO_PUSHD": False, - "AUTO_SUGGEST": True, - "AUTO_SUGGEST_IN_COMPLETIONS": False, - "BASH_COMPLETIONS": BASH_COMPLETIONS_DEFAULT, - "CASE_SENSITIVE_COMPLETIONS": ON_LINUX, - "CDPATH": (), - "COLOR_INPUT": True, - "COLOR_RESULTS": False, - "COMPLETIONS_BRACKETS": True, - "COMPLETIONS_CONFIRM": False, - "COMPLETIONS_DISPLAY": "single", - "COMPLETIONS_MENU_ROWS": 5, - "COMPLETION_QUERY_LIMIT": 100, - "DIRSTACK_SIZE": 20, - "DOTGLOB": False, - "DYNAMIC_CWD_WIDTH": (float("inf"), "c"), - "DYNAMIC_CWD_ELISION_CHAR": "", - "EXPAND_ENV_VARS": True, - "FORCE_POSIX_PATHS": False, - "FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE": False, - "FOREIGN_ALIASES_OVERRIDE": False, - "PROMPT_FIELDS": dict(prompt.PROMPT_FIELDS), - "FUZZY_PATH_COMPLETION": True, - "GLOB_SORTED": True, - "HISTCONTROL": set(), - "IGNOREEOF": False, - "INDENT": " ", - "INTENSIFY_COLORS_ON_WIN": True, - "LANG": "C.UTF-8", - "LC_CTYPE": locale.setlocale(locale.LC_CTYPE), - "LC_COLLATE": locale.setlocale(locale.LC_COLLATE), - "LC_TIME": locale.setlocale(locale.LC_TIME), - "LC_MONETARY": locale.setlocale(locale.LC_MONETARY), - "LC_NUMERIC": locale.setlocale(locale.LC_NUMERIC), - "LS_COLORS": default_lscolors, - "LOADED_RC_FILES": (), - "MOUSE_SUPPORT": False, - "MULTILINE_PROMPT": ".", - "PATH": PATH_DEFAULT, - "PATHEXT": [".COM", ".EXE", ".BAT", ".CMD"] if ON_WINDOWS else [], - "PRETTY_PRINT_RESULTS": True, - "PROMPT": prompt.default_prompt(), - "PROMPT_TOOLKIT_COLOR_DEPTH": "", - "PTK_STYLE_OVERRIDES": dict(PTK2_STYLE), - "PUSHD_MINUS": False, - "PUSHD_SILENT": False, - "RAISE_SUBPROC_ERROR": False, - "RIGHT_PROMPT": "", - "BOTTOM_TOOLBAR": "", - "SHELL_TYPE": "best", - "SUBSEQUENCE_PATH_COMPLETION": True, - "SUPPRESS_BRANCH_TIMEOUT_MESSAGE": False, - "SUGGEST_COMMANDS": True, - "SUGGEST_MAX_NUM": 5, - "SUGGEST_THRESHOLD": 3, - "TITLE": DEFAULT_TITLE, - "UPDATE_COMPLETIONS_ON_KEYPRESS": True, - "UPDATE_OS_ENVIRON": False, - "UPDATE_PROMPT_ON_KEYPRESS": False, - "VC_BRANCH_TIMEOUT": 0.2 if ON_WINDOWS else 0.1, - "VC_HG_SHOW_BRANCH": True, - "VI_MODE": False, - "WIN_UNICODE_CONSOLE": True, - "XDG_CONFIG_HOME": os.path.expanduser(os.path.join("~", ".config")), - "XDG_DATA_HOME": os.path.expanduser(os.path.join("~", ".local", "share")), - "XONSHRC": default_xonshrc, - "XONSH_APPEND_NEWLINE": xonsh_append_newline, - "XONSH_AUTOPAIR": False, - "XONSH_CACHE_SCRIPTS": True, - "XONSH_CACHE_EVERYTHING": False, - "XONSH_COLOR_STYLE": "default", - "XONSH_CONFIG_DIR": xonsh_config_dir, - "XONSH_DATA_DIR": xonsh_data_dir, - "XONSH_DEBUG": 0, - "XONSH_ENCODING": DEFAULT_ENCODING, - "XONSH_ENCODING_ERRORS": "surrogateescape", - "XONSH_HISTORY_BACKEND": "json", - "XONSH_HISTORY_FILE": os.path.expanduser("~/.xonsh_history.json"), - "XONSH_HISTORY_MATCH_ANYWHERE": False, - "XONSH_HISTORY_SIZE": (8128, "commands"), - "XONSH_LOGIN": False, - "XONSH_PROC_FREQUENCY": 1e-4, - "XONSH_SHOW_TRACEBACK": False, - "XONSH_STDERR_PREFIX": "", - "XONSH_STDERR_POSTFIX": "", - "XONSH_STORE_STDIN": False, - "XONSH_STORE_STDOUT": False, - "XONSH_TRACEBACK_LOGFILE": None, - "XONSH_DATETIME_FORMAT": "%Y-%m-%d %H:%M", - } - if hasattr(locale, "LC_MESSAGES"): - dv["LC_MESSAGES"] = locale.setlocale(locale.LC_MESSAGES) - return dv - - -VarDocs = collections.namedtuple( - "VarDocs", ["docstr", "configurable", "default", "store_as_str"] -) -VarDocs.__doc__ = """Named tuple for environment variable documentation - -Parameters ----------- -docstr : str - The environment variable docstring. -configurable : bool, optional - Flag for whether the environment variable is configurable or not. -default : str, optional - Custom docstring for the default value for complex defaults. - Is this is DefaultNotGiven, then the default will be looked up - from DEFAULT_VALUES and converted to a str. -store_as_str : bool, optional - Flag for whether the environment variable should be stored as a - string. This is used when persisting a variable that is not JSON - serializable to the config file. For example, sets, frozensets, and - potentially other non-trivial data types. default, False. -""" -# iterates from back -VarDocs.__new__.__defaults__ = (True, DefaultNotGiven, False) - - -# Please keep the following in alphabetic order - scopatz -@lazyobject -def DEFAULT_DOCS(): - return { - "ANSICON": VarDocs( - "This is used on Windows to set the title, " "if available.", - configurable=False, - ), - "AUTO_CD": VarDocs( - "Flag to enable changing to a directory by entering the dirname or " - "full path only (without the cd command)." - ), - "AUTO_PUSHD": VarDocs( - "Flag for automatically pushing directories onto the directory stack." - ), - "AUTO_SUGGEST": VarDocs( - "Enable automatic command suggestions based on history, like in the fish " - "shell.\n\nPressing the right arrow key inserts the currently " - "displayed suggestion. Only usable with ``$SHELL_TYPE=prompt_toolkit.``" - ), - "AUTO_SUGGEST_IN_COMPLETIONS": VarDocs( - "Places the auto-suggest result as the first option in the completions. " - "This enables you to tab complete the auto-suggestion." - ), - "BASH_COMPLETIONS": VarDocs( - "This is a list (or tuple) of strings that specifies where the " - "``bash_completion`` script may be found. " - "The first valid path will be used. For better performance, " - "bash-completion v2.x is recommended since it lazy-loads individual " - "completion scripts. " - "For both bash-completion v1.x and v2.x, paths of individual completion " - "scripts (like ``.../completes/ssh``) do not need to be included here. " - "The default values are platform " - "dependent, but sane. To specify an alternate list, do so in the run " - "control file.", - default=( - "Normally this is:\n\n" - " ``('/usr/share/bash-completion/bash_completion', )``\n\n" - "But, on Mac it is:\n\n" - " ``('/usr/local/share/bash-completion/bash_completion', " - "'/usr/local/etc/bash_completion')``\n\n" - "Other OS-specific defaults may be added in the future." - ), - ), - "CASE_SENSITIVE_COMPLETIONS": VarDocs( - "Sets whether completions should be case sensitive or case " "insensitive.", - default="True on Linux, False otherwise.", - ), - "CDPATH": VarDocs( - "A list of paths to be used as roots for a cd, breaking compatibility " - "with Bash, xonsh always prefer an existing relative path." - ), - "COLOR_INPUT": VarDocs("Flag for syntax highlighting interactive input."), - "COLOR_RESULTS": VarDocs("Flag for syntax highlighting return values."), - "COMPLETIONS_BRACKETS": VarDocs( - "Flag to enable/disable inclusion of square brackets and parentheses " - "in Python attribute completions.", - default="True", - ), - "COMPLETIONS_DISPLAY": VarDocs( - "Configure if and how Python completions are displayed by the " - "``prompt_toolkit`` shell.\n\nThis option does not affect Bash " - "completions, auto-suggestions, etc.\n\nChanging it at runtime will " - "take immediate effect, so you can quickly disable and enable " - "completions during shell sessions.\n\n" - "- If ``$COMPLETIONS_DISPLAY`` is ``none`` or ``false``, do not display\n" - " those completions.\n" - "- If ``$COMPLETIONS_DISPLAY`` is ``single``, display completions in a\n" - " single column while typing.\n" - "- If ``$COMPLETIONS_DISPLAY`` is ``multi`` or ``true``, display completions\n" - " in multiple columns while typing.\n\n" - "- If ``$COMPLETIONS_DISPLAY`` is ``readline``, display completions\n" - " will emulate the behavior of readline.\n\n" - "These option values are not case- or type-sensitive, so e.g." - "writing ``$COMPLETIONS_DISPLAY = None`` " - "and ``$COMPLETIONS_DISPLAY = 'none'`` are equivalent. Only usable with " - "``$SHELL_TYPE=prompt_toolkit``" - ), - "COMPLETIONS_CONFIRM": VarDocs( - "While tab-completions menu is displayed, press to confirm " - "completion instead of running command. This only affects the " - "prompt-toolkit shell." - ), - "COMPLETIONS_MENU_ROWS": VarDocs( - "Number of rows to reserve for tab-completions menu if " - "``$COMPLETIONS_DISPLAY`` is ``single`` or ``multi``. This only affects the " - "prompt-toolkit shell." - ), - "COMPLETION_QUERY_LIMIT": VarDocs( - "The number of completions to display before the user is asked " - "for confirmation." - ), - "DIRSTACK_SIZE": VarDocs("Maximum size of the directory stack."), - "DOTGLOB": VarDocs( - 'Globbing files with "*" or "**" will also match ' - "dotfiles, or those 'hidden' files whose names " - "begin with a literal '.'. Such files are filtered " - "out by default." - ), - "DYNAMIC_CWD_WIDTH": VarDocs( - "Maximum length in number of characters " - "or as a percentage for the ``cwd`` prompt variable. For example, " - '"20" is a twenty character width and "10%" is ten percent of the ' - "number of columns available." - ), - "DYNAMIC_CWD_ELISION_CHAR": VarDocs( - "The string used to show a shortened directory in a shortened cwd, " - "e.g. ``'…'``." - ), - "EXPAND_ENV_VARS": VarDocs( - "Toggles whether environment variables are expanded inside of strings " - "in subprocess mode." - ), - "FORCE_POSIX_PATHS": VarDocs( - "Forces forward slashes (``/``) on Windows systems when using auto " - "completion if set to anything truthy.", - configurable=ON_WINDOWS, - ), - "FOREIGN_ALIASES_SUPPRESS_SKIP_MESSAGE": VarDocs( - "Whether or not foreign aliases should suppress the message " - "that informs the user when a foreign alias has been skipped " - "because it already exists in xonsh.", - configurable=True, - ), - "FOREIGN_ALIASES_OVERRIDE": VarDocs( - "Whether or not foreign aliases should override xonsh aliases " - "with the same name. Note that setting of this must happen in the " - "environment that xonsh was started from. " - "It cannot be set in the ``.xonshrc`` as loading of foreign aliases happens before" - "``.xonshrc`` is parsed", - configurable=True, - ), - "PROMPT_FIELDS": VarDocs( - "Dictionary containing variables to be used when formatting $PROMPT " - "and $TITLE. See 'Customizing the Prompt' " - "http://xon.sh/tutorial.html#customizing-the-prompt", - configurable=False, - default="``xonsh.prompt.PROMPT_FIELDS``", - ), - "FUZZY_PATH_COMPLETION": VarDocs( - "Toggles 'fuzzy' matching of paths for tab completion, which is only " - "used as a fallback if no other completions succeed but can be used " - "as a way to adjust for typographical errors. If ``True``, then, e.g.," - " ``xonhs`` will match ``xonsh``." - ), - "GLOB_SORTED": VarDocs( - "Toggles whether globbing results are manually sorted. If ``False``, " - "the results are returned in arbitrary order." - ), - "HISTCONTROL": VarDocs( - "A set of strings (comma-separated list in string form) of options " - "that determine what commands are saved to the history list. By " - "default all commands are saved. The option ``ignoredups`` will not " - "save the command if it matches the previous command. The option " - "'ignoreerr' will cause any commands that fail (i.e. return non-zero " - "exit status) to not be added to the history list.", - store_as_str=True, - ), - "IGNOREEOF": VarDocs("Prevents Ctrl-D from exiting the shell."), - "INDENT": VarDocs("Indentation string for multiline input"), - "INTENSIFY_COLORS_ON_WIN": VarDocs( - "Enhance style colors for readability " - "when using the default terminal (``cmd.exe``) on Windows. Blue colors, " - "which are hard to read, are replaced with cyan. Other colors are " - "generally replaced by their bright counter parts.", - configurable=ON_WINDOWS, - ), - "LANG": VarDocs("Fallback locale setting for systems where it matters"), - "LS_COLORS": VarDocs("Color settings for ``ls`` command line utility"), - "LOADED_RC_FILES": VarDocs( - "Whether or not any of the xonsh run control files were loaded at " - "startup. This is a sequence of bools in Python that is converted " - "to a CSV list in string form, ie ``[True, False]`` becomes " - "``'True,False'``.", - configurable=False, - ), - "MOUSE_SUPPORT": VarDocs( - "Enable mouse support in the ``prompt_toolkit`` shell. This allows " - "clicking for positioning the cursor or selecting a completion. In " - "some terminals however, this disables the ability to scroll back " - "through the history of the terminal. Only usable with " - "``$SHELL_TYPE=prompt_toolkit``" - ), - "MULTILINE_PROMPT": VarDocs( - "Prompt text for 2nd+ lines of input, may be str or function which " - "returns a str." - ), - "OLDPWD": VarDocs( - "Used to represent a previous present working directory.", - configurable=False, - ), - "PATH": VarDocs("List of strings representing where to look for executables."), - "PATHEXT": VarDocs( - "Sequence of extension strings (eg, ``.EXE``) for " - "filtering valid executables by. Each element must be " - "uppercase." - ), - "PRETTY_PRINT_RESULTS": VarDocs('Flag for "pretty printing" return values.'), - "PROMPT": VarDocs( - "The prompt text. May contain keyword arguments which are " - "auto-formatted, see 'Customizing the Prompt' at " - "http://xon.sh/tutorial.html#customizing-the-prompt. " - "This value is never inherited from parent processes.", - default="``xonsh.environ.DEFAULT_PROMPT``", - ), - "PROMPT_TOOLKIT_COLOR_DEPTH": VarDocs( - "The color depth used by prompt toolkit 2. Possible values are: " - "``DEPTH_1_BIT``, ``DEPTH_4_BIT``, ``DEPTH_8_BIT``, ``DEPTH_24_BIT`` " - "colors. Default is an empty string which means that prompt toolkit decide." - ), - "PTK_STYLE_OVERRIDES": VarDocs( - "A dictionary containing custom prompt_toolkit style definitions." - ), - "PUSHD_MINUS": VarDocs( - "Flag for directory pushing functionality. False is the normal " "behavior." - ), - "PUSHD_SILENT": VarDocs( - "Whether or not to suppress directory stack manipulation output." - ), - "RAISE_SUBPROC_ERROR": VarDocs( - "Whether or not to raise an error if a subprocess (captured or " - "uncaptured) returns a non-zero exit status, which indicates failure. " - "This is most useful in xonsh scripts or modules where failures " - "should cause an end to execution. This is less useful at a terminal. " - "The error that is raised is a ``subprocess.CalledProcessError``." - ), - "RIGHT_PROMPT": VarDocs( - "Template string for right-aligned text " - "at the prompt. This may be parametrized in the same way as " - "the ``$PROMPT`` variable. Currently, this is only available in the " - "prompt-toolkit shell." - ), - "BOTTOM_TOOLBAR": VarDocs( - "Template string for the bottom toolbar. " - "This may be parametrized in the same way as " - "the ``$PROMPT`` variable. Currently, this is only available in the " - "prompt-toolkit shell." - ), - "SHELL_TYPE": VarDocs( - "Which shell is used. Currently two base shell types are supported:\n\n" - " - ``readline`` that is backed by Python's readline module\n" - " - ``prompt_toolkit`` that uses external library of the same name\n" - " - ``random`` selects a random shell from the above on startup\n" - " - ``best`` selects the most feature-rich shell available on the\n" - " user's system\n\n" - "To use the ``prompt_toolkit`` shell you need to have the " - "`prompt_toolkit `_" - " library installed. To specify which shell should be used, do so in " - "the run control file.", - default="``best``", - ), - "SUBSEQUENCE_PATH_COMPLETION": VarDocs( - "Toggles subsequence matching of paths for tab completion. " - "If ``True``, then, e.g., ``~/u/ro`` can match ``~/lou/carcolh``." - ), - "SUGGEST_COMMANDS": VarDocs( - "When a user types an invalid command, xonsh will try to offer " - "suggestions of similar valid commands if this is True." - ), - "SUGGEST_MAX_NUM": VarDocs( - "xonsh will show at most this many suggestions in response to an " - "invalid command. If negative, there is no limit to how many " - "suggestions are shown." - ), - "SUGGEST_THRESHOLD": VarDocs( - "An error threshold. If the Levenshtein distance between the entered " - "command and a valid command is less than this value, the valid " - 'command will be offered as a suggestion. Also used for "fuzzy" ' - "tab completion of paths." - ), - "SUPPRESS_BRANCH_TIMEOUT_MESSAGE": VarDocs( - "Whether or not to suppress branch timeout warning messages." - ), - "TERM": VarDocs( - "TERM is sometimes set by the terminal emulator. This is used (when " - "valid) to determine whether or not to set the title. Users shouldn't " - "need to set this themselves. Note that this variable should be set as " - "early as possible in order to ensure it is effective. Here are a few " - "options:\n\n" - "* Set this from the program that launches xonsh. On POSIX systems, \n" - " this can be performed by using env, e.g. \n" - " ``/usr/bin/env TERM=xterm-color xonsh`` or similar.\n" - "* From the xonsh command line, namely ``xonsh -DTERM=xterm-color``.\n" - '* In the config file with ``{"env": {"TERM": "xterm-color"}}``.\n' - "* Lastly, in xonshrc with ``$TERM``\n\n" - "Ideally, your terminal emulator will set this correctly but that does " - "not always happen.", - configurable=False, - ), - "TITLE": VarDocs( - "The title text for the window in which xonsh is running. Formatted " - "in the same manner as ``$PROMPT``, see 'Customizing the Prompt' " - "http://xon.sh/tutorial.html#customizing-the-prompt.", - default="``xonsh.environ.DEFAULT_TITLE``", - ), - "UPDATE_COMPLETIONS_ON_KEYPRESS": VarDocs( - "Completions display is evaluated and presented whenever a key is " - "pressed. This avoids the need to press TAB, except to cycle through " - "the possibilities. This currently only affects the prompt-toolkit shell." - ), - "UPDATE_OS_ENVIRON": VarDocs( - "If True ``os_environ`` will always be updated " - "when the xonsh environment changes. The environment can be reset to " - "the default value by calling ``__xonsh__.env.undo_replace_env()``" - ), - "UPDATE_PROMPT_ON_KEYPRESS": VarDocs( - "Disables caching the prompt between commands, " - "so that it would be reevaluated on each keypress. " - "Disabled by default because of the incurred performance penalty." - ), - "VC_BRANCH_TIMEOUT": VarDocs( - "The timeout (in seconds) for version control " - "branch computations. This is a timeout per subprocess call, so the " - "total time to compute will be larger than this in many cases." - ), - "VC_HG_SHOW_BRANCH": VarDocs( - "Whether or not to show the Mercurial branch in the prompt." - ), - "VI_MODE": VarDocs( - "Flag to enable ``vi_mode`` in the ``prompt_toolkit`` shell." - ), - "VIRTUAL_ENV": VarDocs( - "Path to the currently active Python environment.", configurable=False - ), - "WIN_UNICODE_CONSOLE": VarDocs( - "Enables unicode support in windows terminals. Requires the external " - "library ``win_unicode_console``.", - configurable=ON_WINDOWS, - ), - "XDG_CONFIG_HOME": VarDocs( - "Open desktop standard configuration home dir. This is the same " - "default as used in the standard.", - configurable=False, - default="``~/.config``", - ), - "XDG_DATA_HOME": VarDocs( - "Open desktop standard data home dir. This is the same default as " - "used in the standard.", - default="``~/.local/share``", - ), - "XONSHRC": VarDocs( - "A list of the locations of run control files, if they exist. User " - "defined run control file will supersede values set in system-wide " - "control file if there is a naming collision.", - default=( - "On Linux & Mac OSX: ``['/etc/xonshrc', '~/.config/xonsh/rc.xsh', '~/.xonshrc']``\n" - "\nOn Windows: " - "``['%ALLUSERSPROFILE%\\\\xonsh\\\\xonshrc', '~/.config/xonsh/rc.xsh', '~/.xonshrc']``" - ), - ), - "XONSH_APPEND_NEWLINE": VarDocs( - "Append new line when a partial line is preserved in output." - ), - "XONSH_AUTOPAIR": VarDocs( - "Whether Xonsh will auto-insert matching parentheses, brackets, and " - "quotes. Only available under the prompt-toolkit shell." - ), - "XONSH_CACHE_SCRIPTS": VarDocs( - "Controls whether the code for scripts run from xonsh will be cached" - " (``True``) or re-compiled each time (``False``)." - ), - "XONSH_CACHE_EVERYTHING": VarDocs( - "Controls whether all code (including code entered at the interactive" - " prompt) will be cached." - ), - "XONSH_COLOR_STYLE": VarDocs( - "Sets the color style for xonsh colors. This is a style name, not " - "a color map. Run ``xonfig styles`` to see the available styles." - ), - "XONSH_CONFIG_DIR": VarDocs( - "This is the location where xonsh configuration information is stored.", - configurable=False, - default="``$XDG_CONFIG_HOME/xonsh``", - ), - "XONSH_DEBUG": VarDocs( - "Sets the xonsh debugging level. This may be an integer or a boolean. " - "Setting this variable prior to stating xonsh to ``1`` or ``True`` " - "will suppress amalgamated imports. Setting it to ``2`` will get some " - "basic information like input transformation, command replacement. " - "With ``3`` or a higher number will make more debugging information " - "presented, like PLY parsing messages.", - configurable=False, - ), - "XONSH_DATA_DIR": VarDocs( - "This is the location where xonsh data files are stored, such as " - "history.", - default="``$XDG_DATA_HOME/xonsh``", - ), - "XONSH_ENCODING": VarDocs( - "This is the encoding that xonsh should use for subprocess operations.", - default="``sys.getdefaultencoding()``", - ), - "XONSH_ENCODING_ERRORS": VarDocs( - "The flag for how to handle encoding errors should they happen. " - "Any string flag that has been previously registered with Python " - "is allowed. See the 'Python codecs documentation' " - "(https://docs.python.org/3/library/codecs.html#error-handlers) " - "for more information and available options.", - default="``surrogateescape``", - ), - "XONSH_GITSTATUS_*": VarDocs( - "Symbols for gitstatus prompt. Default values are: \n\n" - "* ``XONSH_GITSTATUS_HASH``: ``:``\n" - "* ``XONSH_GITSTATUS_BRANCH``: ``{CYAN}``\n" - "* ``XONSH_GITSTATUS_OPERATION``: ``{CYAN}``\n" - "* ``XONSH_GITSTATUS_STAGED``: ``{RED}●``\n" - "* ``XONSH_GITSTATUS_CONFLICTS``: ``{RED}×``\n" - "* ``XONSH_GITSTATUS_CHANGED``: ``{BLUE}+``\n" - "* ``XONSH_GITSTATUS_UNTRACKED``: ``…``\n" - "* ``XONSH_GITSTATUS_STASHED``: ``⚑``\n" - "* ``XONSH_GITSTATUS_CLEAN``: ``{BOLD_GREEN}✓``\n" - "* ``XONSH_GITSTATUS_AHEAD``: ``↑·``\n" - "* ``XONSH_GITSTATUS_BEHIND``: ``↓·``\n" - ), - "XONSH_HISTORY_BACKEND": VarDocs( - "Set which history backend to use. Options are: 'json', " - "'sqlite', and 'dummy'. The default is 'json'. " - "``XONSH_HISTORY_BACKEND`` also accepts a class type that inherits " - "from ``xonsh.history.base.History``, or its instance." - ), - "XONSH_HISTORY_FILE": VarDocs( - "Location of history file (deprecated).", - configurable=False, - default="``~/.xonsh_history``", - ), - "XONSH_HISTORY_MATCH_ANYWHERE": VarDocs( - "When searching history from a partial string (by pressing up arrow), " - "match command history anywhere in a given line (not just the start)", - default="False", - ), - "XONSH_HISTORY_SIZE": VarDocs( - "Value and units tuple that sets the size of history after garbage " - "collection. Canonical units are:\n\n" - "- ``commands`` for the number of past commands executed,\n" - "- ``files`` for the number of history files to keep,\n" - "- ``s`` for the number of seconds in the past that are allowed, and\n" - "- ``b`` for the number of bytes that history may consume.\n\n" - "Common abbreviations, such as '6 months' or '1 GB' are also allowed.", - default="``(8128, 'commands')`` or ``'8128 commands'``", - ), - "XONSH_INTERACTIVE": VarDocs( - "``True`` if xonsh is running interactively, and ``False`` otherwise.", - configurable=False, - ), - "XONSH_LOGIN": VarDocs( - "``True`` if xonsh is running as a login shell, and ``False`` otherwise.", - configurable=False, - ), - "XONSH_PROC_FREQUENCY": VarDocs( - "The process frequency is the time that " - "xonsh process threads sleep for while running command pipelines. " - "The value has units of seconds [s]." - ), - "XONSH_SHOW_TRACEBACK": VarDocs( - "Controls if a traceback is shown if exceptions occur in the shell. " - "Set to ``True`` to always show traceback or ``False`` to always hide. " - "If undefined then the traceback is hidden but a notice is shown on how " - "to enable the full traceback." - ), - "XONSH_SOURCE": VarDocs( - "When running a xonsh script, this variable contains the absolute path " - "to the currently executing script's file.", - configurable=False, - ), - "XONSH_STDERR_PREFIX": VarDocs( - "A format string, using the same keys and colors as ``$PROMPT``, that " - "is prepended whenever stderr is displayed. This may be used in " - "conjunction with ``$XONSH_STDERR_POSTFIX`` to close out the block." - "For example, to have stderr appear on a red background, the " - 'prefix & postfix pair would be "{BACKGROUND_RED}" & "{NO_COLOR}".' - ), - "XONSH_STDERR_POSTFIX": VarDocs( - "A format string, using the same keys and colors as ``$PROMPT``, that " - "is appended whenever stderr is displayed. This may be used in " - "conjunction with ``$XONSH_STDERR_PREFIX`` to start the block." - "For example, to have stderr appear on a red background, the " - 'prefix & postfix pair would be "{BACKGROUND_RED}" & "{NO_COLOR}".' - ), - "XONSH_STORE_STDIN": VarDocs( - "Whether or not to store the stdin that is supplied to the " - "``!()`` and ``![]`` operators." - ), - "XONSH_STORE_STDOUT": VarDocs( - "Whether or not to store the ``stdout`` and ``stderr`` streams in the " - "history files." - ), - "XONSH_TRACEBACK_LOGFILE": VarDocs( - "Specifies a file to store the traceback log to, regardless of whether " - "``XONSH_SHOW_TRACEBACK`` has been set. Its value must be a writable file " - "or None / the empty string if traceback logging is not desired. " - "Logging to a file is not enabled by default." - ), - "XONSH_DATETIME_FORMAT": VarDocs( - "The format that is used for ``datetime.strptime()`` in various places" - "i.e the history timestamp option" - ), - } - +DEFAULT_VALUES = { + 'AUTO_PUSHD': False, + 'AUTO_SUGGEST': True, + 'BASH_COMPLETIONS': (('/usr/local/etc/bash_completion', + '/opt/local/etc/profile.d/bash_completion.sh', + '/usr/local/etc/bash_completion.d/git-completion.bash') + if ON_MAC else + ('/usr/share/bash-completion/bash_completion', + '/usr/share/bash-completion/completions/git') + if ON_ARCH else + ('/etc/bash_completion', + '/usr/share/bash-completion/completions/git')), + 'CASE_SENSITIVE_COMPLETIONS': ON_LINUX, + 'CDPATH': (), + 'DIRSTACK_SIZE': 20, + 'FORCE_POSIX_PATHS': False, + 'INDENT': ' ', + 'LC_CTYPE': locale.setlocale(locale.LC_CTYPE), + 'LC_COLLATE': locale.setlocale(locale.LC_COLLATE), + 'LC_TIME': locale.setlocale(locale.LC_TIME), + 'LC_MONETARY': locale.setlocale(locale.LC_MONETARY), + 'LC_NUMERIC': locale.setlocale(locale.LC_NUMERIC), + 'MOUSE_SUPPORT': False, + 'MULTILINE_PROMPT': '.', + 'PATH': (), + 'PATHEXT': (), + 'PROMPT': DEFAULT_PROMPT, + 'PROMPT_TOOLKIT_STYLES': None, + 'PUSHD_MINUS': False, + 'PUSHD_SILENT': False, + 'SHELL_TYPE': 'prompt_toolkit', + 'SUGGEST_COMMANDS': True, + 'SUGGEST_MAX_NUM': 5, + 'SUGGEST_THRESHOLD': 3, + 'TEEPTY_PIPE_DELAY': 0.01, + 'TITLE': DEFAULT_TITLE, + 'XDG_CONFIG_HOME': os.path.expanduser(os.path.join('~', '.config')), + 'XDG_DATA_HOME': os.path.expanduser(os.path.join('~', '.local', 'share')), + 'XONSHCONFIG': xonshconfig, + 'XONSHRC': ((os.path.join(os.environ['ALLUSERSPROFILE'], + 'xonsh', 'xonshrc'), + os.path.expanduser('~/.xonshrc')) if ON_WINDOWS + else ('/etc/xonshrc', os.path.expanduser('~/.xonshrc'))), + 'XONSH_CONFIG_DIR': xonsh_config_dir, + 'XONSH_DATA_DIR': xonsh_data_dir, + 'XONSH_ENCODING': DEFAULT_ENCODING, + 'XONSH_ENCODING_ERRORS': 'surrogateescape', + 'XONSH_HISTORY_FILE': os.path.expanduser('~/.xonsh_history.json'), + 'XONSH_HISTORY_SIZE': (8128, 'commands'), + 'XONSH_SHOW_TRACEBACK': False, + 'XONSH_STORE_STDOUT': False, +} +if hasattr(locale, 'LC_MESSAGES'): + DEFAULT_VALUES['LC_MESSAGES'] = locale.setlocale(locale.LC_MESSAGES) + +class DefaultNotGivenType(object): + """Singleton for representing when no default value is given.""" + + +DefaultNotGiven = DefaultNotGivenType() # # actual environment # - -class Env(cabc.MutableMapping): +class Env(MutableMapping): """A xonsh environment, whose variables have limited typing (unlike BASH). Most variables are, by default, strings (like BASH). However, the following rules also apply based on variable-name: * PATH: any variable whose name ends in PATH is a list of strings. * XONSH_HISTORY_SIZE: this variable is an (int | float, str) tuple. - * LC_* (locale categories): locale category names get/set the Python + * LC_* (locale categories): locale catergory names get/set the Python locale via locale.getlocale() and locale.setlocale() functions. An Env instance may be converted to an untyped version suitable for use in a subprocess. """ - _arg_regex = None + _arg_regex = re.compile(r'ARG(\d+)') def __init__(self, *args, **kwargs): - """If no initial environment is given, os_environ is used.""" + """If no initial environment is given, os.environ is used.""" self._d = {} - # sentinel value for non existing envvars - self._no_value = object() - self._orig_env = None - self._ensurers = {k: Ensurer(*v) for k, v in DEFAULT_ENSURERS.items()} - self._defaults = DEFAULT_VALUES - self._docs = DEFAULT_DOCS + self.ensurers = {k: Ensurer(*v) for k, v in DEFAULT_ENSURERS.items()} + self.defaults = DEFAULT_VALUES if len(args) == 0 and len(kwargs) == 0: - args = (os_environ,) + args = (os.environ, ) for key, val in dict(*args, **kwargs).items(): self[key] = val - if ON_WINDOWS: - path_key = next((k for k in self._d if k.upper() == "PATH"), None) - if path_key: - self["PATH"] = self._d.pop(path_key) - if "PATH" not in self._d: - # this is here so the PATH is accessible to subprocs and so that - # it can be modified in-place in the xonshrc file - self._d["PATH"] = list(PATH_DEFAULT) self._detyped = None + self._orig_env = None def detype(self): if self._detyped is not None: return self._detyped ctx = {} for key, val in self._d.items(): - if not isinstance(key, str): + if callable(val) or isinstance(val, MutableMapping): + continue + if not isinstance(key, string_types): key = str(key) ensurer = self.get_ensurer(key) - if ensurer.detype is None: - # cannot be detyped - continue - deval = ensurer.detype(val) - if deval is None: - # cannot be detyped - continue - ctx[key] = deval + val = ensurer.detype(val) + ctx[key] = val self._detyped = ctx return ctx def replace_env(self): - """Replaces the contents of os_environ with a detyped version - of the xonsh environment. + """Replaces the contents of os.environ with a detyped version + of the xonsh environement. """ if self._orig_env is None: - self._orig_env = dict(os_environ) - os_environ.clear() - os_environ.update(self.detype()) + self._orig_env = dict(os.environ) + os.environ.clear() + os.environ.update(self.detype()) def undo_replace_env(self): - """Replaces the contents of os_environ with a detyped version - of the xonsh environment. + """Replaces the contents of os.environ with a detyped version + of the xonsh environement. """ if self._orig_env is not None: - os_environ.clear() - os_environ.update(self._orig_env) + os.environ.clear() + os.environ.update(self._orig_env) self._orig_env = None - def _get_default_ensurer(self, default=None): - if default is not None: - return default - else: - default = Ensurer(always_true, None, ensure_string) - return default - - def get_ensurer(self, key, default=None): + def get_ensurer(self, key, + default=Ensurer(always_true, None, ensure_string)): """Gets an ensurer for the given key.""" - if key in self._ensurers: - return self._ensurers[key] - for k, ensurer in self._ensurers.items(): - if isinstance(k, str): + if key in self.ensurers: + return self.ensurers[key] + for k, ensurer in self.ensurers.items(): + if isinstance(k, string_types): continue - if k.match(key) is not None: + m = k.match(key) + if m is not None: + ens = ensurer break else: - ensurer = self._get_default_ensurer(default=default) - self._ensurers[key] = ensurer - return ensurer - - def set_ensurer(self, key, value): - """Sets an ensurer.""" - self._detyped = None - self._ensurers[key] = value - - def get_docs(self, key, default=VarDocs("")): - """Gets the documentation for the environment variable.""" - vd = self._docs.get(key, None) - if vd is None: - return default - if vd.default is DefaultNotGiven: - dval = pprint.pformat(self._defaults.get(key, "")) - vd = vd._replace(default=dval) - self._docs[key] = vd - return vd - - def help(self, key): - """Get information about a specific environment variable.""" - vardocs = self.get_docs(key) - width = min(79, os.get_terminal_size()[0]) - docstr = "\n".join(textwrap.wrap(vardocs.docstr, width=width)) - template = HELP_TEMPLATE.format( - envvar=key, - docstr=docstr, - default=vardocs.default, - configurable=vardocs.configurable, - ) - print_color(template) - - def is_manually_set(self, varname): - """ - Checks if an environment variable has been manually set. - """ - return varname in self._d - - @contextlib.contextmanager - def swap(self, other=None, **kwargs): - """Provides a context manager for temporarily swapping out certain - environment variables with other values. On exit from the context - manager, the original values are restored. - """ - old = {} - # single positional argument should be a dict-like object - if other is not None: - for k, v in other.items(): - old[k] = self.get(k, NotImplemented) - self[k] = v - # kwargs could also have been sent in - for k, v in kwargs.items(): - old[k] = self.get(k, NotImplemented) - self[k] = v - - exception = None - try: - yield self - except Exception as e: - exception = e - finally: - # restore the values - for k, v in old.items(): - if v is NotImplemented: - del self[k] - else: - self[k] = v - if exception is not None: - raise exception from None + ens = default + self.ensurers[key] = ens + return ens # # Mutable mapping interface # def __getitem__(self, key): - # remove this block on next release - if key is Ellipsis: - return self - elif key in self._d: - val = self._d[key] - elif key in self._defaults: - val = self._defaults[key] - if is_callable_default(val): - val = val(self) - else: - e = "Unknown environment variable: ${}" - raise KeyError(e.format(key)) - if isinstance( - val, (cabc.MutableSet, cabc.MutableSequence, cabc.MutableMapping) - ): + m = self._arg_regex.match(key) + if (m is not None) and (key not in self._d) and ('ARGS' in self._d): + args = self._d['ARGS'] + ix = int(m.group(1)) + if ix >= len(args): + e = "Not enough arguments given to access ARG{0}." + raise IndexError(e.format(ix)) + return self._d['ARGS'][ix] + val = self._d[key] + if isinstance(val, (MutableSet, MutableSequence, MutableMapping)): self._detyped = None - return val + return self._d[key] def __setitem__(self, key, val): ensurer = self.get_ensurer(key) if not ensurer.validate(val): val = ensurer.convert(val) - # existing envvars can have any value including None - old_value = self._d[key] if key in self._d else self._no_value self._d[key] = val self._detyped = None - if self.get("UPDATE_OS_ENVIRON"): - if self._orig_env is None: - self.replace_env() - elif ensurer.detype is None: - pass - else: - deval = ensurer.detype(val) - if deval is not None: - os_environ[key] = deval - if old_value is self._no_value: - events.on_envvar_new.fire(name=key, value=val) - elif old_value != val: - events.on_envvar_change.fire(name=key, oldvalue=old_value, newvalue=val) def __delitem__(self, key): del self._d[key] self._detyped = None - if self.get("UPDATE_OS_ENVIRON") and key in os_environ: - del os_environ[key] - def get(self, key, default=None): + def get(self, key, default=DefaultNotGiven): """The environment will look up default values from its own defaults if a default is not given here. """ - try: - return self[key] - except KeyError: - return default + if key in self: + val = self[key] + elif default is DefaultNotGiven: + val = self.defaults.get(key, None) + if is_callable_default(val): + val = val(self) + else: + val = default + return val def __iter__(self): - yield from (set(self._d) | set(self._defaults)) - - def __contains__(self, item): - return item in self._d or item in self._defaults + yield from self._d def __len__(self): return len(self._d) @@ -1543,136 +313,370 @@ def __str__(self): return str(self._d) def __repr__(self): - return "{0}.{1}(...)".format( - self.__class__.__module__, self.__class__.__name__, self._d - ) + return '{0}.{1}({2})'.format(self.__class__.__module__, + self.__class__.__name__, self._d) def _repr_pretty_(self, p, cycle): - name = "{0}.{1}".format(self.__class__.__module__, self.__class__.__name__) - with p.group(0, name + "(", ")"): + name = '{0}.{1}'.format(self.__class__.__module__, + self.__class__.__name__) + with p.group(0, name + '(', ')'): if cycle: - p.text("...") + p.text('...') elif len(self): p.break_() p.pretty(dict(self)) -def _yield_executables(directory, name): +def locate_binary(name, cwd): + # StackOverflow for `where` tip: http://stackoverflow.com/a/304447/90297 + locator = 'where' if ON_WINDOWS else 'which' + try: + binary_location = subprocess.check_output([locator, name], + cwd=cwd, + stderr=subprocess.PIPE, + universal_newlines=True) + if not binary_location: + return + except (subprocess.CalledProcessError, FileNotFoundError): + return + + return binary_location + + +def ensure_git(func): + @wraps(func) + def wrapper(*args, **kwargs): + # Get cwd or bail + kwargs['cwd'] = kwargs.get('cwd', _get_cwd()) + if kwargs['cwd'] is None: + return + + # step out completely if git is not installed + if locate_binary('git', kwargs['cwd']) is None: + return + + return func(*args, **kwargs) + return wrapper + + +def ensure_hg(func): + @wraps(func) + def wrapper(*args, **kwargs): + kwargs['cwd'] = kwargs.get('cwd', _get_cwd()) + if kwargs['cwd'] is None: + return + + # walk up the directory tree to see if we are inside an hg repo + path = kwargs['cwd'].split(os.path.sep) + while len(path) > 0: + if os.path.exists(os.path.sep.join(path + ['.hg'])): + break + del path[-1] + + # bail if we aren't inside a repository + if path == []: + return + + kwargs['root'] = os.path.sep.join(path) + + # step out completely if hg is not installed + if locate_binary('hg', kwargs['cwd']) is None: + return + + return func(*args, **kwargs) + return wrapper + + +@ensure_git +def get_git_branch(cwd=None): + branch = None + + if not ON_WINDOWS: + prompt_scripts = ['/usr/lib/git-core/git-sh-prompt', + '/usr/local/etc/bash_completion.d/git-prompt.sh'] + + for script in prompt_scripts: + # note that this is about 10x faster than bash -i "__git_ps1" + _input = ('source {}; __git_ps1 "${{1:-%s}}"'.format(script)) + try: + branch = subprocess.check_output(['bash', ], + cwd=cwd, + input=_input, + stderr=subprocess.PIPE, + universal_newlines=True) + if len(branch) == 0: + branch = None + except (subprocess.CalledProcessError, FileNotFoundError): + continue + + # fall back to using the git binary if the above failed + if branch is None: + try: + cmd = ['git', 'rev-parse', '--abbrev-ref', 'HEAD'] + s = subprocess.check_output(cmd, + stderr=subprocess.PIPE, + cwd=cwd, + universal_newlines=True) + s = s.strip() + if len(s) > 0: + branch = s + except (subprocess.CalledProcessError, FileNotFoundError): + pass + + return branch + + +def call_hg_command(command, cwd): + # Override user configurations settings and aliases + hg_env = os.environ.copy() + hg_env['HGRCPATH'] = "" + + s = None + try: + s = subprocess.check_output(['hg'] + command, + stderr=subprocess.PIPE, + cwd=cwd, + universal_newlines=True, + env=hg_env) + except (subprocess.CalledProcessError, FileNotFoundError): + pass + + return s + + +@ensure_hg +def get_hg_branch(cwd=None, root=None): + branch = None + active_bookmark = None + + if root is not None: + branch_path = os.path.sep.join([root, '.hg', 'branch']) + bookmark_path = os.path.sep.join([root, '.hg', 'bookmarks.current']) + + if os.path.exists(branch_path): + with open(branch_path, 'r') as branch_file: + branch = branch_file.read() + else: + branch = call_hg_command(['branch'], cwd) + + if os.path.exists(bookmark_path): + with open(bookmark_path, 'r') as bookmark_file: + active_bookmark = bookmark_file.read() + + if active_bookmark is not None: + return "{0}, {1}".format( + *(b.strip(os.linesep) for b in (branch, active_bookmark))) + + return branch.strip(os.linesep) if branch else None + + +def current_branch(pad=True): + """Gets the branch for a current working directory. Returns None + if the cwd is not a repository. This currently only works for git and hg + and should be extended in the future. + """ + branch = get_git_branch() or get_hg_branch() + + if pad and branch is not None: + branch = ' ' + branch + + return branch or '' + + +@ensure_git +def git_dirty_working_directory(cwd=None): + try: + cmd = ['git', 'status', '--porcelain'] + s = subprocess.check_output(cmd, + stderr=subprocess.PIPE, + cwd=cwd, + universal_newlines=True) + return bool(s) + except (subprocess.CalledProcessError, FileNotFoundError): + return False + + +@ensure_hg +def hg_dirty_working_directory(cwd=None, root=None): + id = call_hg_command(['identify', '--id'], cwd) + if id is None: + return False + return id.strip(os.linesep).endswith('+') + + +def dirty_working_directory(cwd=None): + """Returns a boolean as to whether there are uncommitted files in version + control repository we are inside. Currently supports git and hg. + """ + return git_dirty_working_directory() or hg_dirty_working_directory() + + +def branch_color(): + """Return red if the current branch is dirty, otherwise green""" + return (TERM_COLORS['BOLD_RED'] if dirty_working_directory() else + TERM_COLORS['BOLD_GREEN']) + + +def _replace_home(x): if ON_WINDOWS: - base_name, ext = os.path.splitext(name.lower()) - for fname in executables_in(directory): - fbase, fext = os.path.splitext(fname.lower()) - if base_name == fbase and (len(ext) == 0 or ext == fext): - yield os.path.join(directory, fname) + home = (builtins.__xonsh_env__['HOMEDRIVE'] + + builtins.__xonsh_env__['HOMEPATH'][0]) + cwd = x.replace(home, '~') + + if builtins.__xonsh_env__.get('FORCE_POSIX_PATHS'): + cwd = cwd.replace(os.sep, os.altsep) + + return cwd + else: + return x.replace(builtins.__xonsh_env__['HOME'], '~') + +_replace_home_cwd = lambda: _replace_home(builtins.__xonsh_env__['PWD']) + + +if ON_WINDOWS: + USER = 'USERNAME' +else: + USER = 'USER' + + +FORMATTER_DICT = dict( + user=os.environ.get(USER, ''), + hostname=socket.gethostname().split('.', 1)[0], + cwd=_replace_home_cwd, + cwd_dir=lambda: os.path.dirname(_replace_home_cwd()), + cwd_base=lambda: os.path.basename(_replace_home_cwd()), + curr_branch=current_branch, + branch_color=branch_color, + **TERM_COLORS) +DEFAULT_VALUES['FORMATTER_DICT'] = dict(FORMATTER_DICT) + +_FORMATTER = string.Formatter() + +def format_prompt(template=DEFAULT_PROMPT, formatter_dict=None): + """Formats a xonsh prompt template string.""" + template = template() if callable(template) else template + if formatter_dict is None: + fmtter = builtins.__xonsh_env__.get('FORMATTER_DICT', FORMATTER_DICT) + else: + fmtter = formatter_dict + included_names = set(i[1] for i in _FORMATTER.parse(template)) + fmt = {} + for name in included_names: + if name is None: + continue + if name.startswith('$'): + v = builtins.__xonsh_env__[name[1:]] + else: + v = fmtter[name] + val = v() if callable(v) else v + val = '' if val is None else val + fmt[name] = val + return template.format(**fmt) + + +RE_HIDDEN = re.compile('\001.*?\002') + +def multiline_prompt(): + """Returns the filler text for the prompt in multiline scenarios.""" + curr = builtins.__xonsh_env__.get('PROMPT') + curr = format_prompt(curr) + line = curr.rsplit('\n', 1)[1] if '\n' in curr else curr + line = RE_HIDDEN.sub('', line) # gets rid of colors + # most prompts end in whitespace, head is the part before that. + head = line.rstrip() + headlen = len(head) + # tail is the trailing whitespace + tail = line if headlen == 0 else line.rsplit(head[-1], 1)[1] + # now to constuct the actual string + dots = builtins.__xonsh_env__.get('MULTILINE_PROMPT') + dots = dots() if callable(dots) else dots + if dots is None or len(dots) == 0: + return '' + return (dots * (headlen // len(dots))) + dots[:headlen % len(dots)] + tail + + +BASE_ENV = { + 'BASH_COMPLETIONS': list(DEFAULT_VALUES['BASH_COMPLETIONS']), + 'FORMATTER_DICT': dict(DEFAULT_VALUES['FORMATTER_DICT']), + 'XONSH_VERSION': XONSH_VERSION, +} + +def load_static_config(ctx): + """Loads a static configuration file from a given context, rather than the + current environment. + """ + env = {} + env['XDG_CONFIG_HOME'] = ctx.get('XDG_CONFIG_HOME', + DEFAULT_VALUES['XDG_CONFIG_HOME']) + env['XONSH_CONFIG_DIR'] = ctx['XONSH_CONFIG_DIR'] if 'XONSH_CONFIG_DIR' in ctx \ + else xonsh_config_dir(env) + env['XONSHCONFIG'] = ctx['XONSHCONFIG'] if 'XONSHCONFIG' in ctx \ + else xonshconfig(env) + config = env['XONSHCONFIG'] + if os.path.isfile(config): + with open(config, 'r') as f: + conf = json.load(f) else: - for x in executables_in(directory): - if x == name: - yield os.path.join(directory, name) - return - - -def locate_binary(name): - """Locates an executable on the file system.""" - return builtins.__xonsh__.commands_cache.locate_binary(name) - - -BASE_ENV = LazyObject( - lambda: { - "BASH_COMPLETIONS": list(DEFAULT_VALUES["BASH_COMPLETIONS"]), - "PROMPT_FIELDS": dict(DEFAULT_VALUES["PROMPT_FIELDS"]), - "XONSH_VERSION": XONSH_VERSION, - }, - globals(), - "BASE_ENV", -) - - -def xonshrc_context(rcfiles=None, execer=None, ctx=None, env=None, login=True): - """Attempts to read in all xonshrc files and return the context.""" - loaded = env["LOADED_RC_FILES"] = [] - ctx = {} if ctx is None else ctx - if rcfiles is None: - return env - env["XONSHRC"] = tuple(rcfiles) + conf = {} + return conf + + +def xonshrc_context(rcfiles=None, execer=None): + """Attempts to read in xonshrc file, and return the contents.""" + if (rcfiles is None or execer is None + or sum([os.path.isfile(rcfile) for rcfile in rcfiles]) == 0): + return {} + env = {} for rcfile in rcfiles: if not os.path.isfile(rcfile): - loaded.append(False) continue - _, ext = os.path.splitext(rcfile) - status = xonsh_script_run_control(rcfile, ctx, env, execer=execer, login=login) - loaded.append(status) - return ctx + with open(rcfile, 'r') as f: + rc = f.read() + if not rc.endswith('\n'): + rc += '\n' + fname = execer.filename + try: + execer.filename = rcfile + execer.exec(rc, glbs=env) + except SyntaxError as err: + msg = 'syntax error in xonsh run control file {0!r}: {1!s}' + warn(msg.format(rcfile, err), RuntimeWarning) + finally: + execer.filename = fname + return env -def windows_foreign_env_fixes(ctx): +def windows_env_fixes(ctx): """Environment fixes for Windows. Operates in-place.""" + # Windows default prompt doesn't work. + ctx['PROMPT'] = DEFAULT_PROMPT # remove these bash variables which only cause problems. - for ev in ["HOME", "OLDPWD"]: + for ev in ['HOME', 'OLDPWD']: if ev in ctx: del ctx[ev] # Override path-related bash variables; on Windows bash uses # /c/Windows/System32 syntax instead of C:\\Windows\\System32 # which messes up these environment variables for xonsh. - for ev in ["PATH", "TEMP", "TMP"]: - if ev in os_environ: - ctx[ev] = os_environ[ev] + for ev in ['PATH', 'TEMP', 'TMP']: + if ev in os.environ: + ctx[ev] = os.environ[ev] elif ev in ctx: del ctx[ev] - ctx["PWD"] = _get_cwd() or "" - - -def foreign_env_fixes(ctx): - """Environment fixes for all operating systems""" - if "PROMPT" in ctx: - del ctx["PROMPT"] - - -def xonsh_script_run_control(filename, ctx, env, execer=None, login=True): - """Loads a xonsh file and applies it as a run control.""" - if execer is None: - return False - updates = {"__file__": filename, "__name__": os.path.abspath(filename)} - try: - with swap_values(ctx, updates): - run_script_with_cache(filename, execer, ctx) - loaded = True - except SyntaxError as err: - msg = "syntax error in xonsh run control file {0!r}: {1!s}" - print_exception(msg.format(filename, err)) - loaded = False - except Exception as err: - msg = "error running xonsh run control file {0!r}: {1!s}" - print_exception(msg.format(filename, err)) - loaded = False - return loaded + ctx['PWD'] = _get_cwd() def default_env(env=None): """Constructs a default xonsh environment.""" # in order of increasing precedence ctx = dict(BASE_ENV) - ctx.update(os_environ) - ctx["PWD"] = _get_cwd() or "" - # These can cause problems for programs (#2543) - ctx.pop("LINES", None) - ctx.pop("COLUMNS", None) - # other shells' PROMPT definitions generally don't work in XONSH: - try: - del ctx["PROMPT"] - except KeyError: - pass + ctx.update(os.environ) + conf = load_static_config(ctx) + ctx.update(conf.get('env', ())) + ctx.update(load_foreign_envs(shells=conf.get('foreign_shells', DEFAULT_SHELLS), + issue_warning=False)) + if ON_WINDOWS: + windows_env_fixes(ctx) # finalize env if env is not None: ctx.update(env) return ctx - - -def make_args_env(args=None): - """Makes a dictionary containing the $ARGS and $ARG environment - variables. If the supplied ARGS is None, then sys.argv is used. - """ - if args is None: - args = sys.argv - env = {"ARG" + str(i): arg for i, arg in enumerate(args)} - env["ARGS"] = list(args) # make a copy so we don't interfere with original variable - return env diff --git a/xonsh/events.py b/xonsh/events.py deleted file mode 100644 index 195194f..0000000 --- a/xonsh/events.py +++ /dev/null @@ -1,347 +0,0 @@ -""" -Events for xonsh. - -In all likelihood, you want builtins.events - -The best way to "declare" an event is something like:: - - events.doc('on_spam', "Comes with eggs") -""" -import abc -import builtins -import collections.abc -import inspect - -from xonsh.tools import print_exception - - -def has_kwargs(func): - return any( - p.kind == p.VAR_KEYWORD for p in inspect.signature(func).parameters.values() - ) - - -def debug_level(): - if hasattr(builtins, "__xonsh__") and hasattr(builtins.__xonsh__, "env"): - return builtins.__xonsh__.env.get("XONSH_DEBUG") - # FIXME: Under py.test, return 1(?) - else: - return 0 # Optimize for speed, not guaranteed correctness - - -class AbstractEvent(collections.abc.MutableSet, abc.ABC): - """ - A given event that handlers can register against. - - Acts as a ``MutableSet`` for registered handlers. - - Note that ordering is never guaranteed. - """ - - @property - def species(self): - """ - The species (basically, class) of the event - """ - return type(self).__bases__[ - 0 - ] # events.on_chdir -> -> - - def __call__(self, handler): - """ - Registers a handler. It's suggested to use this as a decorator. - - A decorator method is added to the handler, validator(). If a validator - function is added, it can filter if the handler will be considered. The - validator takes the same arguments as the handler. If it returns False, - the handler will not called or considered, as if it was not registered - at all. - - Parameters - ---------- - handler : callable - The handler to register - - Returns - ------- - rtn : callable - The handler - """ - # Using Python's "private" munging to minimize hypothetical collisions - handler.__validator = None - if debug_level(): - if not has_kwargs(handler): - raise ValueError("Event handlers need a **kwargs for future proofing") - self.add(handler) - - def validator(vfunc): - """ - Adds a validator function to a handler to limit when it is considered. - """ - if debug_level(): - if not has_kwargs(handler): - raise ValueError( - "Event validators need a **kwargs for future proofing" - ) - handler.__validator = vfunc - - handler.validator = validator - - return handler - - def _filterhandlers(self, handlers, **kwargs): - """ - Helper method for implementing classes. Generates the handlers that pass validation. - """ - for handler in handlers: - if handler.__validator is not None and not handler.__validator(**kwargs): - continue - yield handler - - @abc.abstractmethod - def fire(self, **kwargs): - """ - Fires an event, calling registered handlers with the given arguments. - - Parameters - ---------- - **kwargs : - Keyword arguments to pass to each handler - """ - - -class Event(AbstractEvent): - """ - An event species for notify and scatter-gather events. - """ - - # Wish I could just pull from set... - def __init__(self): - self._handlers = set() - self._firing = False - self._delayed_adds = None - self._delayed_discards = None - - def __len__(self): - return len(self._handlers) - - def __contains__(self, item): - return item in self._handlers - - def __iter__(self): - yield from self._handlers - - def add(self, item): - """ - Add an element to a set. - - This has no effect if the element is already present. - """ - if self._firing: - if self._delayed_adds is None: - self._delayed_adds = set() - self._delayed_adds.add(item) - else: - self._handlers.add(item) - - def discard(self, item): - """ - Remove an element from a set if it is a member. - - If the element is not a member, do nothing. - """ - if self._firing: - if self._delayed_discards is None: - self._delayed_discards = set() - self._delayed_discards.add(item) - else: - self._handlers.discard(item) - - def fire(self, **kwargs): - """ - Fires an event, calling registered handlers with the given arguments. A non-unique iterable - of the results is returned. - - Each handler is called immediately. Exceptions are turned in to warnings. - - Parameters - ---------- - **kwargs : - Keyword arguments to pass to each handler - - Returns - ------- - vals : iterable - Return values of each handler. If multiple handlers return the same value, it will - appear multiple times. - """ - vals = [] - self._firing = True - for handler in self._filterhandlers(self._handlers, **kwargs): - try: - rv = handler(**kwargs) - except Exception: - print_exception("Exception raised in event handler; ignored.") - else: - vals.append(rv) - # clean up - self._firing = False - if self._delayed_adds is not None: - self._handlers.update(self._delayed_adds) - self._delayed_adds = None - if self._delayed_discards is not None: - self._handlers.difference_update(self._delayed_discards) - self._delayed_discards = None - return vals - - -class LoadEvent(AbstractEvent): - """ - An event species where each handler is called exactly once, shortly after either the event is - fired or the handler is registered (whichever is later). Additional firings are ignored. - - Note: Does not support scatter/gather, due to never knowing when we have all the handlers. - - Note: Maintains a strong reference to pargs/kwargs in case of the addition of future handlers. - - Note: This is currently NOT thread safe. - """ - - def __init__(self): - self._fired = set() - self._unfired = set() - self._hasfired = False - - def __len__(self): - return len(self._fired) + len(self._unfired) - - def __contains__(self, item): - return item in self._fired or item in self._unfired - - def __iter__(self): - yield from self._fired - yield from self._unfired - - def add(self, item): - """ - Add an element to a set. - - This has no effect if the element is already present. - """ - if self._hasfired: - self._call(item) - self._fired.add(item) - else: - self._unfired.add(item) - - def discard(self, item): - """ - Remove an element from a set if it is a member. - - If the element is not a member, do nothing. - """ - self._fired.discard(item) - self._unfired.discard(item) - - def _call(self, handler): - try: - handler(**self._kwargs) - except Exception: - print_exception("Exception raised in event handler; ignored.") - - def fire(self, **kwargs): - if self._hasfired: - return - self._kwargs = kwargs - while self._unfired: - handler = self._unfired.pop() - self._call(handler) - self._hasfired = True - return () # Entirely for API compatibility - - -class EventManager: - """ - Container for all events in a system. - - Meant to be a singleton, but doesn't enforce that itself. - - Each event is just an attribute. They're created dynamically on first use. - """ - - def doc(self, name, docstring): - """ - Applies a docstring to an event. - - Parameters - ---------- - name : str - The name of the event, eg "on_precommand" - docstring : str - The docstring to apply to the event - """ - type(getattr(self, name)).__doc__ = docstring - - @staticmethod - def _mkevent(name, species=Event, doc=None): - # NOTE: Also used in `xonsh_events` test fixture - # (A little bit of magic to enable docstrings to work right) - return type( - name, - (species,), - { - "__doc__": doc, - "__module__": "xonsh.events", - "__qualname__": "events." + name, - }, - )() - - def transmogrify(self, name, species): - """ - Converts an event from one species to another, preserving handlers and docstring. - - Please note: Some species maintain specialized state. This is lost on transmogrification. - - Parameters - ---------- - name : str - The name of the event, eg "on_precommand" - species : subclass of AbstractEvent - The type to turn the event in to. - """ - if isinstance(species, str): - species = globals()[species] - - if not issubclass(species, AbstractEvent): - raise ValueError("Invalid event class; must be a subclass of AbstractEvent") - - oldevent = getattr(self, name) - newevent = self._mkevent(name, species, type(oldevent).__doc__) - setattr(self, name, newevent) - - for handler in oldevent: - newevent.add(handler) - - def exists(self, name): - """Checks if an event with a given name exist. If it does not exist, it - will not be created. That is what makes this different than - ``hasattr(events, name)``, which will create the event. - """ - return name in self.__dict__ - - def __getattr__(self, name): - """Get an event, if it doesn't already exist.""" - if name.startswith("_"): - raise AttributeError - # This is only called if the attribute doesn't exist, so create the Event... - e = self._mkevent(name) - # ... and save it. - setattr(self, name, e) - # Now it exists, and we won't be called again. - return e - - -# Not lazy because: -# 1. Initialization of EventManager can't be much cheaper -# 2. It's expected to be used at load time, negating any benefits of using lazy object -events = EventManager() diff --git a/xonsh/execer.py b/xonsh/execer.py index 1815a5b..4ec9814 100644 --- a/xonsh/execer.py +++ b/xonsh/execer.py @@ -1,37 +1,25 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh executer.""" -import sys +"""Implements the xonsh executer""" +import re +import os import types import inspect import builtins -import collections.abc as cabc +from collections import Iterable, Sequence, Mapping -from xonsh.ast import CtxAwareTransformer +from xonsh import ast from xonsh.parser import Parser -from xonsh.tools import ( - subproc_toks, - find_next_break, - get_logical_line, - replace_logical_line, - balanced_parens, - starting_whitespace, -) -from xonsh.built_ins import load_builtins, unload_builtins, load_proxies, unload_proxies +from xonsh.tools import subproc_toks +from xonsh.built_ins import load_builtins, unload_builtins class Execer(object): """Executes xonsh code in a context.""" - def __init__( - self, - filename="", - debug_level=0, - parser_args=None, - unload=True, - xonsh_ctx=None, - scriptcache=True, - cacheall=False, - ): + def __init__(self, + filename='', + debug_level=0, + parser_args=None, + unload=True): """Parameters ---------- filename : str, optional @@ -42,47 +30,32 @@ def __init__( Arguments to pass down to the parser. unload : bool, optional Whether or not to unload xonsh builtins upon deletion. - xonsh_ctx : dict or None, optional - Xonsh xontext to load as builtins.__xonsh__.ctx - scriptcache : bool, optional - Whether or not to use a precompiled bytecode cache when execing - code, default: True. - cacheall : bool, optional - Whether or not to cache all xonsh code, and not just files. If this - is set to true, it will cache command line input too, default: False. """ parser_args = parser_args or {} self.parser = Parser(**parser_args) self.filename = filename self.debug_level = debug_level self.unload = unload - self.scriptcache = scriptcache - self.cacheall = cacheall - self.ctxtransformer = CtxAwareTransformer(self.parser) - load_builtins(execer=self, ctx=xonsh_ctx) - load_proxies() + self.ctxtransformer = ast.CtxAwareTransformer(self.parser) + load_builtins(execer=self) def __del__(self): if self.unload: - unload_proxies() unload_builtins() - def parse(self, input, ctx, mode="exec", filename=None, transform=True): + def parse(self, input, ctx, mode='exec'): """Parses xonsh code in a context-aware fashion. For context-free - parsing, please use the Parser class directly or pass in - transform=False. + parsing, please use the Parser class directly. """ - if filename is None: - filename = self.filename - if not transform: - return self.parser.parse( - input, filename=filename, mode=mode, debug_level=(self.debug_level > 2) - ) + if ctx is None: + ctx = set() + elif isinstance(ctx, Mapping): + ctx = set(ctx.keys()) # Parsing actually happens in a couple of phases. The first is a # shortcut for a context-free parser. Normally, all subprocess # lines should be wrapped in $(), to indicate that they are a - # subproc. But that would be super annoying. Unfortunately, Python + # subproc. But that would be super annoying. Unfortnately, Python # mode - after indentation - is whitespace agnostic while, using # the Python token, subproc mode is whitespace aware. That is to say, # in Python mode "ls -l", "ls-l", and "ls - l" all parse to the @@ -93,7 +66,7 @@ def parse(self, input, ctx, mode="exec", filename=None, transform=True): # tokens for all of the Python rules. The lazy way implemented here # is to parse a line a second time with a $() wrapper if it fails # the first time. This is a context-free phase. - tree, input = self._parse_ctx_free(input, mode=mode, filename=filename) + tree = self._parse_ctx_free(input, mode=mode) if tree is None: return None @@ -103,25 +76,11 @@ def parse(self, input, ctx, mode="exec", filename=None, transform=True): # (ls) is part of the execution context. If it isn't, then we will # assume that this line is supposed to be a subprocess line, assuming # it also is valid as a subprocess line. - if ctx is None: - ctx = set() - elif isinstance(ctx, cabc.Mapping): - ctx = set(ctx.keys()) - tree = self.ctxtransformer.ctxvisit( - tree, input, ctx, mode=mode, debug_level=self.debug_level - ) + tree = self.ctxtransformer.ctxvisit(tree, input, ctx, mode=mode) return tree - def compile( - self, - input, - mode="exec", - glbs=None, - locs=None, - stacklevel=2, - filename=None, - transform=True, - ): + def compile(self, input, mode='exec', glbs=None, locs=None, stacklevel=2, + filename=None): """Compiles xonsh code into a Python code object, which may then be execed or evaled. """ @@ -132,93 +91,63 @@ def compile( glbs = frame.f_globals if glbs is None else glbs locs = frame.f_locals if locs is None else locs ctx = set(dir(builtins)) | set(glbs.keys()) | set(locs.keys()) - tree = self.parse(input, ctx, mode=mode, filename=filename, transform=transform) + tree = self.parse(input, ctx, mode=mode) if tree is None: return None # handles comment only input code = compile(tree, filename, mode) return code - def eval( - self, input, glbs=None, locs=None, stacklevel=2, filename=None, transform=True - ): + def eval(self, input, glbs=None, locs=None, stacklevel=2): """Evaluates (and returns) xonsh code.""" if isinstance(input, types.CodeType): code = input else: - if filename is None: - filename = self.filename - code = self.compile( - input=input, - glbs=glbs, - locs=locs, - mode="eval", - stacklevel=stacklevel, - filename=filename, - transform=transform, - ) + code = self.compile(input=input, + glbs=glbs, + locs=locs, + mode='eval', + stacklevel=stacklevel) if code is None: return None # handles comment only input return eval(code, glbs, locs) - def exec( - self, - input, - mode="exec", - glbs=None, - locs=None, - stacklevel=2, - filename=None, - transform=True, - ): + def exec(self, input, mode='exec', glbs=None, locs=None, stacklevel=2): """Execute xonsh code.""" if isinstance(input, types.CodeType): code = input else: - if filename is None: - filename = self.filename - code = self.compile( - input=input, - glbs=glbs, - locs=locs, - mode=mode, - stacklevel=stacklevel, - filename=filename, - transform=transform, - ) + code = self.compile(input=input, + glbs=glbs, + locs=locs, + mode=mode, + stacklevel=stacklevel) if code is None: return None # handles comment only input return exec(code, glbs, locs) - def _print_debug_wrapping( - self, line, sbpline, last_error_line, last_error_col, maxcol=None - ): - """print some debugging info if asked for.""" - if self.debug_level > 1: - msg = "{0}:{1}:{2}{3} - {4}\n" "{0}:{1}:{2}{3} + {5}" - mstr = "" if maxcol is None else ":" + str(maxcol) - msg = msg.format( - self.filename, last_error_line, last_error_col, mstr, line, sbpline - ) - print(msg, file=sys.stderr) + def _find_next_break(self, line, mincol): + if mincol >= 1: + line = line[mincol:] + if ';' not in line: + return None + maxcol = None + self.parser.lexer.input(line) + for tok in self.parser.lexer: + if tok.type == 'SEMI': + maxcol = tok.lexpos + mincol + 1 + break + return maxcol - def _parse_ctx_free(self, input, mode="exec", filename=None, logical_input=False): + def _parse_ctx_free(self, input, mode='exec'): last_error_line = last_error_col = -1 parsed = False original_error = None - greedy = False - if filename is None: - filename = self.filename - if logical_input: - beg_spaces = starting_whitespace(input) - input = input[len(beg_spaces) :] while not parsed: try: - tree = self.parser.parse( - input, - filename=filename, - mode=mode, - debug_level=(self.debug_level > 2), - ) + tree = self.parser.parse(input, + filename=self.filename, + mode=mode, + debug_level=self.debug_level) parsed = True except IndentationError as e: if original_error is None: @@ -228,95 +157,47 @@ def _parse_ctx_free(self, input, mode="exec", filename=None, logical_input=False except SyntaxError as e: if original_error is None: original_error = e - if (e.loc is None) or ( - last_error_line == e.loc.lineno - and last_error_col in (e.loc.column + 1, e.loc.column) - ): - raise original_error from None - elif last_error_line != e.loc.lineno: - original_error = e + if (e.loc is None) or (last_error_line == e.loc.lineno and + last_error_col in (e.loc.column + 1, + e.loc.column)): + raise original_error last_error_col = e.loc.column last_error_line = e.loc.lineno idx = last_error_line - 1 lines = input.splitlines() - line, nlogical, idx = get_logical_line(lines, idx) - if nlogical > 1 and not logical_input: - _, sbpline = self._parse_ctx_free( - line, mode=mode, filename=filename, logical_input=True - ) - self._print_debug_wrapping( - line, sbpline, last_error_line, last_error_col, maxcol=None - ) - replace_logical_line(lines, sbpline, idx, nlogical) - last_error_col += 3 - input = "\n".join(lines) - continue - if input.endswith("\n"): - lines.append("") + line = lines[idx] + if input.endswith('\n'): + lines.append('') if len(line.strip()) == 0: # whitespace only lines are not valid syntax in Python's # interactive mode='single', who knew?! Just ignore them. - # this might cause actual syntax errors to have bad line - # numbers reported, but should only affect interactive mode + # this might cause actual sytax errors to have bad line + # numbers reported, but should only effect interactive mode del lines[idx] last_error_line = last_error_col = -1 - input = "\n".join(lines) + input = '\n'.join(lines) continue - if last_error_line > 1 and lines[idx - 1].rstrip()[-1:] == ":": + if last_error_line > 1 and lines[idx-1].rstrip()[-1:] == ':': # catch non-indented blocks and raise error. - prev_indent = len(lines[idx - 1]) - len(lines[idx - 1].lstrip()) + prev_indent = len(lines[idx-1]) - len(lines[idx-1].lstrip()) curr_indent = len(lines[idx]) - len(lines[idx].lstrip()) if prev_indent == curr_indent: raise original_error - lexer = self.parser.lexer - maxcol = ( - None - if greedy - else find_next_break(line, mincol=last_error_col, lexer=lexer) - ) - if not greedy and maxcol in (e.loc.column + 1, e.loc.column): - # go greedy the first time if the syntax error was because - # we hit an end token out of place. This usually indicates - # a subshell or maybe a macro. - if not balanced_parens(line, maxcol=maxcol): - greedy = True - maxcol = None - sbpline = subproc_toks( - line, returnline=True, greedy=greedy, maxcol=maxcol, lexer=lexer - ) + maxcol = self._find_next_break(line, last_error_col) + sbpline = subproc_toks(line, + returnline=True, + maxcol=maxcol, + lexer=self.parser.lexer) if sbpline is None: - # subprocess line had no valid tokens, - if len(line.partition("#")[0].strip()) == 0: - # likely because it only contained a comment. - del lines[idx] - last_error_line = last_error_col = -1 - input = "\n".join(lines) - continue - elif not greedy: - greedy = True - continue - else: - # or for some other syntax error - raise original_error - elif sbpline[last_error_col:].startswith( - "![![" - ) or sbpline.lstrip().startswith("![!["): - # if we have already wrapped this in subproc tokens - # and it still doesn't work, adding more won't help - # anything - if not greedy: - greedy = True - continue - else: - raise original_error - # replace the line - self._print_debug_wrapping( - line, sbpline, last_error_line, last_error_col, maxcol=maxcol - ) - replace_logical_line(lines, sbpline, idx, nlogical) + # subprocess line had no valid tokens, likely because + # it only contained a comment. + del lines[idx] + last_error_line = last_error_col = -1 + input = '\n'.join(lines) + continue + else: + lines[idx] = sbpline last_error_col += 3 - input = "\n".join(lines) - if logical_input: - input = beg_spaces + input - return tree, input + input = '\n'.join(lines) + return tree diff --git a/xonsh/foreign_shells.py b/xonsh/foreign_shells.py index 2bea06a..1f509a8 100644 --- a/xonsh/foreign_shells.py +++ b/xonsh/foreign_shells.py @@ -1,172 +1,32 @@ -# -*- coding: utf-8 -*- """Tools to help interface with foreign shells, such as Bash.""" import os import re import json import shlex -import sys -import tempfile import builtins import subprocess -import warnings -import functools -import collections.abc as cabc +from warnings import warn +from functools import lru_cache +from collections import MutableMapping, Mapping, Sequence -from xonsh.lazyasd import lazyobject from xonsh.tools import to_bool, ensure_string -from xonsh.platform import ON_WINDOWS, ON_CYGWIN, ON_MSYS -COMMAND = """{seterrprevcmd} -{prevcmd} +COMMAND = """ echo __XONSH_ENV_BEG__ {envcmd} echo __XONSH_ENV_END__ echo __XONSH_ALIAS_BEG__ {aliascmd} echo __XONSH_ALIAS_END__ -echo __XONSH_FUNCS_BEG__ -{funcscmd} -echo __XONSH_FUNCS_END__ -{postcmd} -{seterrpostcmd}""" +""".strip() -DEFAULT_BASH_FUNCSCMD = r"""# get function names from declare -declstr=$(declare -F) -read -r -a decls <<< $declstr -funcnames="" -for((n=0;n<${#decls[@]};n++)); do - if (( $(($n % 3 )) == 2 )); then - # get every 3rd entry - funcnames="$funcnames ${decls[$n]}" - fi -done - -# get functions locations: funcname lineno filename -shopt -s extdebug -namelocfilestr=$(declare -F $funcnames) -shopt -u extdebug - -# print just names and files as JSON object -read -r -a namelocfile <<< $namelocfilestr -sep=" " -namefile="{" -while IFS='' read -r line || [[ -n "$line" ]]; do - name=${line%%"$sep"*} - locfile=${line#*"$sep"} - loc=${locfile%%"$sep"*} - file=${locfile#*"$sep"} - namefile="${namefile}\"${name}\":\"${file//\\/\\\\}\"," -done <<< "$namelocfilestr" -if [[ "{" == "${namefile}" ]]; then - namefile="${namefile}}" -else - namefile="${namefile%?}}" -fi -echo $namefile""" - -DEFAULT_ZSH_FUNCSCMD = """# get function names -autoload -U is-at-least # We'll need to version check zsh -namefile="{" -for name in ${(ok)functions}; do - # force zsh to load the func in order to get the filename, - # but use +X so that it isn't executed. - autoload +X $name || continue - loc=$(whence -v $name) - loc=${(z)loc} - if is-at-least 5.2; then - file=${loc[-1]} - else - file=${loc[7,-1]} - fi - namefile="${namefile}\\"${name}\\":\\"${(Q)file:A}\\"," -done -if [[ "{" == "${namefile}" ]]; then - namefile="${namefile}}" -else - namefile="${namefile%?}}" -fi -echo ${namefile}""" - - -# mapping of shell name aliases to keys in other lookup dictionaries. -@lazyobject -def CANON_SHELL_NAMES(): - return { - "bash": "bash", - "/bin/bash": "bash", - "zsh": "zsh", - "/bin/zsh": "zsh", - "/usr/bin/zsh": "zsh", - "cmd": "cmd", - "cmd.exe": "cmd", - } - - -@lazyobject -def DEFAULT_ENVCMDS(): - return {"bash": "env", "zsh": "env", "cmd": "set"} - - -@lazyobject -def DEFAULT_ALIASCMDS(): - return {"bash": "alias", "zsh": "alias -L", "cmd": ""} - - -@lazyobject -def DEFAULT_FUNCSCMDS(): - return {"bash": DEFAULT_BASH_FUNCSCMD, "zsh": DEFAULT_ZSH_FUNCSCMD, "cmd": ""} - - -@lazyobject -def DEFAULT_SOURCERS(): - return {"bash": "source", "zsh": "source", "cmd": "call"} - - -@lazyobject -def DEFAULT_TMPFILE_EXT(): - return {"bash": ".sh", "zsh": ".zsh", "cmd": ".bat"} - - -@lazyobject -def DEFAULT_RUNCMD(): - return {"bash": "-c", "zsh": "-c", "cmd": "/C"} - - -@lazyobject -def DEFAULT_SETERRPREVCMD(): - return {"bash": "set -e", "zsh": "set -e", "cmd": "@echo off"} - - -@lazyobject -def DEFAULT_SETERRPOSTCMD(): - return {"bash": "", "zsh": "", "cmd": "if errorlevel 1 exit 1"} - - -@functools.lru_cache() -def foreign_shell_data( - shell, - interactive=True, - login=False, - envcmd=None, - aliascmd=None, - extra_args=(), - currenv=None, - safe=True, - prevcmd="", - postcmd="", - funcscmd=None, - sourcer=None, - use_tmpfile=False, - tmpfile_ext=None, - runcmd=None, - seterrprevcmd=None, - seterrpostcmd=None, - show=False, - dryrun=False, -): - """Extracts data from a foreign (non-xonsh) shells. Currently this gets - the environment, aliases, and functions but may be extended in the future. +@lru_cache() +def foreign_shell_data(shell, interactive=True, login=False, envcmd='env', + aliascmd='alias', extra_args=(), currenv=None, + safe=True): + """Extracts data from a foreign (non-xonsh) shells. Currently this gets + the environment and aliases, but may be extended in the future. Parameters ---------- @@ -176,143 +36,49 @@ def foreign_shell_data( Whether the shell should be run in interactive mode. login : bool, optional Whether the shell should be a login shell. - envcmd : str or None, optional + envcmd : str, optional The command to generate environment output with. - aliascmd : str or None, optional - The command to generate alias output with. + aliascmd : str, optional + The command to generate alais output with. extra_args : tuple of str, optional - Additional command line options to pass into the shell. + Addtional command line options to pass into the shell. currenv : tuple of items or None, optional Manual override for the current environment. safe : bool, optional - Flag for whether or not to safely handle exceptions and other errors. - prevcmd : str, optional - A command to run in the shell before anything else, useful for - sourcing and other commands that may require environment recovery. - postcmd : str, optional - A command to run after everything else, useful for cleaning up any - damage that the prevcmd may have caused. - funcscmd : str or None, optional - This is a command or script that can be used to determine the names - and locations of any functions that are native to the foreign shell. - This command should print *only* a JSON object that maps - function names to the filenames where the functions are defined. - If this is None, then a default script will attempted to be looked - up based on the shell name. Callable wrappers for these functions - will be returned in the aliases dictionary. - sourcer : str or None, optional - How to source a foreign shell file for purposes of calling functions - in that shell. If this is None, a default value will attempt to be - looked up based on the shell name. - use_tmpfile : bool, optional - This specifies if the commands are written to a tmp file or just - parsed directly to the shell - tmpfile_ext : str or None, optional - If tmpfile is True this sets specifies the extension used. - runcmd : str or None, optional - Command line switches to use when running the script, such as - -c for Bash and /C for cmd.exe. - seterrprevcmd : str or None, optional - Command that enables exit-on-error for the shell that is run at the - start of the script. For example, this is "set -e" in Bash. To disable - exit-on-error behavior, simply pass in an empty string. - seterrpostcmd : str or None, optional - Command that enables exit-on-error for the shell that is run at the end - of the script. For example, this is "if errorlevel 1 exit 1" in - cmd.exe. To disable exit-on-error behavior, simply pass in an - empty string. - show : bool, optional - Whether or not to display the script that will be run. - dryrun : bool, optional - Whether or not to actually run and process the command. - + Flag for whether or not to safely handle exceptions and other errors. Returns ------- env : dict - Dictionary of shell's environment. (None if the subproc command fails) + Dictionary of shell's environment aliases : dict - Dictionary of shell's aliases, this includes foreign function - wrappers.(None if the subproc command fails) + Dictionary of shell's alaiases. """ cmd = [shell] cmd.extend(extra_args) # needs to come here for GNU long options if interactive: - cmd.append("-i") + cmd.append('-i') if login: - cmd.append("-l") - shkey = CANON_SHELL_NAMES[shell] - envcmd = DEFAULT_ENVCMDS.get(shkey, "env") if envcmd is None else envcmd - aliascmd = DEFAULT_ALIASCMDS.get(shkey, "alias") if aliascmd is None else aliascmd - funcscmd = DEFAULT_FUNCSCMDS.get(shkey, "echo {}") if funcscmd is None else funcscmd - tmpfile_ext = ( - DEFAULT_TMPFILE_EXT.get(shkey, "sh") if tmpfile_ext is None else tmpfile_ext - ) - runcmd = DEFAULT_RUNCMD.get(shkey, "-c") if runcmd is None else runcmd - seterrprevcmd = ( - DEFAULT_SETERRPREVCMD.get(shkey, "") if seterrprevcmd is None else seterrprevcmd - ) - seterrpostcmd = ( - DEFAULT_SETERRPOSTCMD.get(shkey, "") if seterrpostcmd is None else seterrpostcmd - ) - command = COMMAND.format( - envcmd=envcmd, - aliascmd=aliascmd, - prevcmd=prevcmd, - postcmd=postcmd, - funcscmd=funcscmd, - seterrprevcmd=seterrprevcmd, - seterrpostcmd=seterrpostcmd, - ).strip() - if show: - print(command) - if dryrun: - return None, None - cmd.append(runcmd) - if not use_tmpfile: - cmd.append(command) - else: - tmpfile = tempfile.NamedTemporaryFile(suffix=tmpfile_ext, delete=False) - tmpfile.write(command.encode("utf8")) - tmpfile.close() - cmd.append(tmpfile.name) - if currenv is None and hasattr(builtins.__xonsh__, "env"): - currenv = builtins.__xonsh__.env.detype() + cmd.append('-l') + cmd.append('-c') + cmd.append(COMMAND.format(envcmd=envcmd, aliascmd=aliascmd)) + if currenv is None and hasattr(builtins, '__xonsh_env__'): + currenv = builtins.__xonsh_env__.detype() elif currenv is not None: currenv = dict(currenv) try: - s = subprocess.check_output( - cmd, - stderr=subprocess.PIPE, - env=currenv, - # start new session to avoid hangs - # (doesn't work on Cygwin though) - start_new_session=((not ON_CYGWIN) and (not ON_MSYS)), - universal_newlines=True, - ) + s = subprocess.check_output(cmd,stderr=subprocess.PIPE, env=currenv, + universal_newlines=True) except (subprocess.CalledProcessError, FileNotFoundError): if not safe: raise - return None, None - finally: - if use_tmpfile: - os.remove(tmpfile.name) + return {}, {} env = parse_env(s) - aliases = parse_aliases(s, shell=shell, sourcer=sourcer, extra_args=extra_args) - funcs = parse_funcs(s, shell=shell, sourcer=sourcer, extra_args=extra_args) - aliases.update(funcs) + aliases = parse_aliases(s) return env, aliases -@lazyobject -def ENV_RE(): - return re.compile("__XONSH_ENV_BEG__\n(.*)" "__XONSH_ENV_END__", flags=re.DOTALL) - - -@lazyobject -def ENV_SPLIT_RE(): - return re.compile("^([^=]+)=([^=]*|[^\n]*)$", flags=re.DOTALL | re.MULTILINE) - +ENV_RE = re.compile('__XONSH_ENV_BEG__\n(.*)__XONSH_ENV_END__', flags=re.DOTALL) def parse_env(s): """Parses the environment portion of string into a dict.""" @@ -320,400 +86,151 @@ def parse_env(s): if m is None: return {} g1 = m.group(1) - g1 = g1[:-1] if g1.endswith("\n") else g1 - env = dict(ENV_SPLIT_RE.findall(g1)) + items = [line.split('=', 1) for line in g1.splitlines() if '=' in line] + env = dict(items) return env -@lazyobject -def ALIAS_RE(): - return re.compile( - "__XONSH_ALIAS_BEG__\n(.*)" "__XONSH_ALIAS_END__", flags=re.DOTALL - ) - - -@lazyobject -def FS_EXEC_ALIAS_RE(): - return re.compile(r";|`|\$\(") - +ALIAS_RE = re.compile('__XONSH_ALIAS_BEG__\n(.*)__XONSH_ALIAS_END__', + flags=re.DOTALL) -def parse_aliases(s, shell, sourcer=None, extra_args=()): +def parse_aliases(s): """Parses the aliases portion of string into a dict.""" m = ALIAS_RE.search(s) if m is None: return {} g1 = m.group(1) - items = [ - line.split("=", 1) - for line in g1.splitlines() - if line.startswith("alias ") and "=" in line - ] + items = [line.split('=', 1) for line in g1.splitlines() if \ + line.startswith('alias ') and '=' in line] aliases = {} for key, value in items: try: key = key[6:] # lstrip 'alias ' # undo bash's weird quoting of single quotes (sh_single_quote) - value = value.replace("'\\''", "'") + value = value.replace('\'\\\'\'', '\'') # strip one single quote at the start and end of value - if value[0] == "'" and value[-1] == "'": + if value[0] == '\'' and value[-1] == '\'': value = value[1:-1] - # now compute actual alias - if FS_EXEC_ALIAS_RE.search(value) is None: - # simple list of args alias - value = shlex.split(value) - else: - # alias is more complex, use ExecAlias, but via shell - filename = "" - value = ForeignShellExecAlias( - src=value, - shell=shell, - filename=filename, - sourcer=sourcer, - extra_args=extra_args, - ) + value = shlex.split(value) except ValueError as exc: - warnings.warn( - 'could not parse alias "{0}": {1!r}'.format(key, exc), RuntimeWarning - ) + warn('could not parse alias "{0}": {1!r}'.format(key, exc), + RuntimeWarning) continue aliases[key] = value return aliases -@lazyobject -def FUNCS_RE(): - return re.compile( - "__XONSH_FUNCS_BEG__\n(.+)\n" "__XONSH_FUNCS_END__", flags=re.DOTALL - ) - - -def parse_funcs(s, shell, sourcer=None, extra_args=()): - """Parses the funcs portion of a string into a dict of callable foreign - function wrappers. - """ - m = FUNCS_RE.search(s) - if m is None: - return {} - g1 = m.group(1) - if ON_WINDOWS: - g1 = g1.replace(os.sep, os.altsep) - try: - namefiles = json.loads(g1.strip()) - except json.decoder.JSONDecodeError as exc: - msg = ( - "{0!r}\n\ncould not parse {1} functions:\n" - " s = {2!r}\n" - " g1 = {3!r}\n\n" - "Note: you may be seeing this error if you use zsh with " - "prezto. Prezto overwrites GNU coreutils functions (like echo) " - "with its own zsh functions. Please try disabling prezto." - ) - warnings.warn(msg.format(exc, shell, s, g1), RuntimeWarning) - return {} - sourcer = DEFAULT_SOURCERS.get(shell, "source") if sourcer is None else sourcer - funcs = {} - for funcname, filename in namefiles.items(): - if funcname.startswith("_") or not filename: - continue # skip private functions and invalid files - if not os.path.isabs(filename): - filename = os.path.abspath(filename) - wrapper = ForeignShellFunctionAlias( - funcname=funcname, - shell=shell, - sourcer=sourcer, - filename=filename, - extra_args=extra_args, - ) - funcs[funcname] = wrapper - return funcs - - -class ForeignShellBaseAlias(object): - """This class is responsible for calling foreign shell functions as if - they were aliases. This does not currently support taking stdin. - """ - - INPUT = "echo ForeignShellBaseAlias {shell} {filename} {args}\n" - - def __init__(self, shell, filename, sourcer=None, extra_args=()): - """ - Parameters - ---------- - shell : str - Name or path to shell - filename : str - Where the function is defined, path to source. - sourcer : str or None, optional - Command to source foreign files with. - extra_args : tuple of str, optional - Additional command line options to pass into the shell. - """ - sourcer = DEFAULT_SOURCERS.get(shell, "source") if sourcer is None else sourcer - self.shell = shell - self.filename = filename - self.sourcer = sourcer - self.extra_args = extra_args - - def _input_kwargs(self): - return { - "shell": self.shell, - "filename": self.filename, - "sourcer": self.sourcer, - "extra_args": self.extra_args, - } - - def __eq__(self, other): - if not hasattr(other, "_input_kwargs") or not callable(other._input_kwargs): - return NotImplemented - return self._input_kwargs() == other._input_kwargs() - - def __call__( - self, args, stdin=None, stdout=None, stderr=None, spec=None, stack=None - ): - args, streaming = self._is_streaming(args) - input = self.INPUT.format(args=" ".join(args), **self._input_kwargs()) - cmd = [self.shell] + list(self.extra_args) + ["-c", input] - env = builtins.__xonsh__.env - denv = env.detype() - if streaming: - subprocess.check_call(cmd, env=denv) - out = None - else: - out = subprocess.check_output(cmd, env=denv, stderr=subprocess.STDOUT) - out = out.decode( - encoding=env.get("XONSH_ENCODING"), - errors=env.get("XONSH_ENCODING_ERRORS"), - ) - out = out.replace("\r\n", "\n") - return out - - def __repr__(self): - return ( - self.__class__.__name__ - + "(" - + ", ".join( - [ - "{k}={v!r}".format(k=k, v=v) - for k, v in sorted(self._input_kwargs().items()) - ] - ) - + ")" - ) - - @staticmethod - def _is_streaming(args): - """Test and modify args if --xonsh-stream is present.""" - if "--xonsh-stream" not in args: - return args, False - args = list(args) - args.remove("--xonsh-stream") - return args, True - - -class ForeignShellFunctionAlias(ForeignShellBaseAlias): - """This class is responsible for calling foreign shell functions as if - they were aliases. This does not currently support taking stdin. - """ - - INPUT = '{sourcer} "{filename}"\n' "{funcname} {args}\n" - - def __init__(self, funcname, shell, filename, sourcer=None, extra_args=()): - """ - Parameters - ---------- - funcname : str - function name - shell : str - Name or path to shell - filename : str - Where the function is defined, path to source. - sourcer : str or None, optional - Command to source foreign files with. - extra_args : tuple of str, optional - Additional command line options to pass into the shell. - """ - super().__init__( - shell=shell, filename=filename, sourcer=sourcer, extra_args=extra_args - ) - self.funcname = funcname - - def _input_kwargs(self): - inp = super()._input_kwargs() - inp["funcname"] = self.funcname - return inp - - -class ForeignShellExecAlias(ForeignShellBaseAlias): - """Provides a callable alias for source code in a foreign shell.""" - - INPUT = "{src} {args}\n" - - def __init__( - self, - src, - shell, - filename="", - sourcer=None, - extra_args=(), - ): - """ - Parameters - ---------- - src : str - Source code in the shell language - shell : str - Name or path to shell - filename : str - Where the function is defined, path to source. - sourcer : str or None, optional - Command to source foreign files with. - extra_args : tuple of str, optional - Additional command line options to pass into the shell. - """ - super().__init__( - shell=shell, filename=filename, sourcer=sourcer, extra_args=extra_args - ) - self.src = src.strip() - - def _input_kwargs(self): - inp = super()._input_kwargs() - inp["src"] = self.src - return inp - - -@lazyobject -def VALID_SHELL_PARAMS(): - return frozenset( - [ - "shell", - "interactive", - "login", - "envcmd", - "aliascmd", - "extra_args", - "currenv", - "safe", - "prevcmd", - "postcmd", - "funcscmd", - "sourcer", - ] - ) - +VALID_SHELL_PARAMS = frozenset(['shell', 'interactive', 'login', 'envcmd', + 'aliascmd', 'extra_args', 'currenv', 'safe']) def ensure_shell(shell): """Ensures that a mapping follows the shell specification.""" - if not isinstance(shell, cabc.MutableMapping): + if not isinstance(shell, MutableMapping): shell = dict(shell) shell_keys = set(shell.keys()) if not (shell_keys <= VALID_SHELL_PARAMS): - msg = "unknown shell keys: {0}" + msg = 'unknown shell keys: {0}' raise KeyError(msg.format(shell_keys - VALID_SHELL_PARAMS)) - shell["shell"] = ensure_string(shell["shell"]).lower() - if "interactive" in shell_keys: - shell["interactive"] = to_bool(shell["interactive"]) - if "login" in shell_keys: - shell["login"] = to_bool(shell["login"]) - if "envcmd" in shell_keys: - shell["envcmd"] = ( - None if shell["envcmd"] is None else ensure_string(shell["envcmd"]) - ) - if "aliascmd" in shell_keys: - shell["aliascmd"] = ( - None if shell["aliascmd"] is None else ensure_string(shell["aliascmd"]) - ) - if "extra_args" in shell_keys and not isinstance(shell["extra_args"], tuple): - shell["extra_args"] = tuple(map(ensure_string, shell["extra_args"])) - if "currenv" in shell_keys and not isinstance(shell["currenv"], tuple): - ce = shell["currenv"] - if isinstance(ce, cabc.Mapping): + shell['shell'] = ensure_string(shell['shell']) + if 'interactive' in shell_keys: + shell['interactive'] = to_bool(shell['interactive']) + if 'login' in shell_keys: + shell['login'] = to_bool(shell['login']) + if 'envcmd' in shell_keys: + shell['envcmd'] = eunsure_string(shell['envcmd']) + if 'aliascmd' in shell_keys: + shell['aliascmd'] = eunsure_string(shell['aliascmd']) + if 'extra_args' in shell_keys and not isinstance(shell['extra_args'], tuple): + shell['extra_args'] = tuple(map(ensure_string, shell['extra_args'])) + if 'currenv' in shell_keys and not isinstance(shell['currenv'], tuple): + ce = shell['currenv'] + if isinstance(ce, Mapping): ce = tuple([(ensure_string(k), v) for k, v in ce.items()]) - elif isinstance(ce, cabc.Sequence): + elif isinstance(ce, Sequence): ce = tuple([(ensure_string(k), v) for k, v in ce]) else: - raise RuntimeError("unrecognized type for currenv") - shell["currenv"] = ce - if "safe" in shell_keys: - shell["safe"] = to_bool(shell["safe"]) - if "prevcmd" in shell_keys: - shell["prevcmd"] = ensure_string(shell["prevcmd"]) - if "postcmd" in shell_keys: - shell["postcmd"] = ensure_string(shell["postcmd"]) - if "funcscmd" in shell_keys: - shell["funcscmd"] = ( - None if shell["funcscmd"] is None else ensure_string(shell["funcscmd"]) - ) - if "sourcer" in shell_keys: - shell["sourcer"] = ( - None if shell["sourcer"] is None else ensure_string(shell["sourcer"]) - ) - if "seterrprevcmd" in shell_keys: - shell["seterrprevcmd"] = ( - None - if shell["seterrprevcmd"] is None - else ensure_string(shell["seterrprevcmd"]) - ) - if "seterrpostcmd" in shell_keys: - shell["seterrpostcmd"] = ( - None - if shell["seterrpostcmd"] is None - else ensure_string(shell["seterrpostcmd"]) - ) + raise RuntimeError('unrecognized type for currenv') + shell['currenv'] = ce + if 'safe' in shell_keys: + shell['safe'] = to_bool(shell['safe']) return shell -def load_foreign_envs(shells): +DEFAULT_SHELLS = ({'shell': 'bash'},) + +def _get_shells(shells=None, config=None, issue_warning=True): + if shells is not None and config is not None: + raise RuntimeError('Only one of shells and config may be non-None.') + elif shells is not None: + pass + else: + if config is None: + config = builtins.__xonsh_env__.get('XONSHCONFIG') + if os.path.isfile(config): + with open(config, 'r') as f: + conf = json.load(f) + shells = conf.get('foreign_shells', DEFAULT_SHELLS) + else: + if issue_warning: + msg = 'could not find xonsh config file ($XONSHCONFIG) at {0!r}' + warn(msg.format(config), RuntimeWarning) + shells = DEFAULT_SHELLS + return shells + + +def load_foreign_envs(shells=None, config=None, issue_warning=True): """Loads environments from foreign shells. Parameters ---------- - shells : sequence of dicts + shells : sequence of dicts, optional An iterable of dicts that can be passed into foreign_shell_data() as - keyword arguments. + keyword arguments. Not compatible with config not being None. + config : str of None, optional + Path to the static config file. Not compatible with shell not being None. + If both shell and config is None, then it will be read from the + $XONSHCONFIG environment variable. + issue_warning : bool, optional + Issues warnings if config file cannot be found. Returns ------- env : dict A dictionary of the merged environments. """ + shells = _get_shells(shells=shells, config=config, issue_warning=issue_warning) env = {} for shell in shells: shell = ensure_shell(shell) shenv, _ = foreign_shell_data(**shell) - if shenv: - env.update(shenv) + env.update(shenv) return env -def load_foreign_aliases(shells): +def load_foreign_aliases(shells=None, config=None, issue_warning=True): """Loads aliases from foreign shells. Parameters ---------- - shells : sequence of dicts + shells : sequence of dicts, optional An iterable of dicts that can be passed into foreign_shell_data() as - keyword arguments. + keyword arguments. Not compatible with config not being None. + config : str of None, optional + Path to the static config file. Not compatible with shell not being None. + If both shell and config is None, then it will be read from the + $XONSHCONFIG environment variable. + issue_warning : bool, optional + Issues warnings if config file cannot be found. Returns ------- aliases : dict A dictionary of the merged aliases. """ + shells = _get_shells(shells=shells, config=config, issue_warning=issue_warning) aliases = {} - xonsh_aliases = builtins.aliases for shell in shells: shell = ensure_shell(shell) _, shaliases = foreign_shell_data(**shell) - if not builtins.__xonsh__.env.get("FOREIGN_ALIASES_OVERRIDE"): - shaliases = {} if shaliases is None else shaliases - for alias in set(shaliases) & set(xonsh_aliases): - del shaliases[alias] - if builtins.__xonsh__.env.get("XONSH_DEBUG") > 1: - print( - "aliases: ignoring alias {!r} of shell {!r} " - "which tries to override xonsh alias." - "".format(alias, shell["shell"]), - file=sys.stderr, - ) aliases.update(shaliases) return aliases diff --git a/xonsh/fs.py b/xonsh/fs.py deleted file mode 100644 index a275b12..0000000 --- a/xonsh/fs.py +++ /dev/null @@ -1,98 +0,0 @@ -""" -Backported functions to implement the PEP 519 (Adding a file system path protocol) API. -""" - -import abc -import sys -import io -import pathlib - -try: - from os import PathLike, fspath, fsencode, fsdecode -except ImportError: - - class PathLike(abc.ABC): - """Abstract base class for implementing the file system path protocol.""" - - @abc.abstractmethod - def __fspath__(self): - """Return the file system path representation of the object.""" - raise NotImplementedError - - PathLike.register(pathlib.Path) - - def fspath(path): - """Return the string representation of the path. - - If str or bytes is passed in, it is returned unchanged. If __fspath__() - returns something other than str or bytes then TypeError is raised. If - this function is given something that is not str, bytes, or os.PathLike - then TypeError is raised. - """ - if isinstance(path, (str, bytes)): - return path - - if isinstance(path, pathlib.Path): - return str(path) - - # Work from the object's type to match method resolution of other magic - # methods. - path_type = type(path) - try: - path = path_type.__fspath__(path) - except AttributeError: - if hasattr(path_type, "__fspath__"): - raise - else: - if isinstance(path, (str, bytes)): - return path - else: - raise TypeError( - "expected __fspath__() to return str or bytes, " - "not " + type(path).__name__ - ) - - raise TypeError( - "expected str, bytes or os.PathLike object, not " + path_type.__name__ - ) - - def _fscodec(): - encoding = sys.getfilesystemencoding() - if encoding == "mbcs": - errors = "strict" - else: - errors = "surrogateescape" - - def fsencode(filename): - """Encode filename (an os.PathLike, bytes, or str) to the filesystem - encoding with 'surrogateescape' error handler, return bytes unchanged. - On Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, str): - return filename.encode(encoding, errors) - else: - return filename - - def fsdecode(filename): - """Decode filename (an os.PathLike, bytes, or str) from the filesystem - encoding with 'surrogateescape' error handler, return str unchanged. On - Windows, use 'strict' error handler if the file system encoding is - 'mbcs' (which is the default encoding). - """ - filename = fspath(filename) # Does type-checking of `filename`. - if isinstance(filename, bytes): - return filename.decode(encoding, errors) - else: - return filename - - return fsencode, fsdecode - - fsencode, fsdecode = _fscodec() - del _fscodec - - def open(file, *pargs, **kwargs): - if isinstance(file, PathLike): - file = fspath(file) - return io.open(file, *pargs, **kwargs) diff --git a/xonsh/history.py b/xonsh/history.py new file mode 100644 index 0000000..00d44eb --- /dev/null +++ b/xonsh/history.py @@ -0,0 +1,392 @@ +"""Implements the xonsh history object""" +import os +import uuid +import time +import builtins +from glob import iglob +from collections import deque, Sequence, OrderedDict +from threading import Thread, Condition + +from xonsh import lazyjson +from xonsh.tools import ensure_int_or_slice, to_history_tuple +from xonsh import diff_history + + +class HistoryGC(Thread): + + def __init__(self, wait_for_shell=True, size=None, *args, **kwargs): + """Thread responsible for garbage collecting old history. May wait for + shell (and thus xonshrc to have been loaded) to start work. + """ + super(HistoryGC, self).__init__(*args, **kwargs) + self.daemon = True + self.size = size + self.wait_for_shell = wait_for_shell + self.start() + + def run(self): + while self.wait_for_shell: + time.sleep(0.01) + env = builtins.__xonsh_env__ + if self.size is None: + hsize, units = env.get('XONSH_HISTORY_SIZE') + else: + hsize, units = to_history_tuple(self.size) + files = self.unlocked_files() + # flag files for removal + if units == 'commands': + n = 0 + ncmds = 0 + rmfiles = [] + for ts, fcmds, f in files[::-1]: + if fcmds == 0: + # we need to make sure that 'empty' history files don't hang around + fmfiles.append((ts, fcmds, f)) + if ncmds + fcmds > hsize: + break + ncmds += fcmds + n += 1 + rmfiles += files[:-n] + elif units == 'files': + rmfiles = files[:-hsize] if len(files) > hsize else [] + elif units == 's': + now = time.time() + rmfiles = [] + for ts, _, f in files: + if (now - ts) < hsize: + break + rmfiles.append((None, None, f)) + elif units == 'b': + n = 0 + nbytes = 0 + for _, _, f in files[::-1]: + fsize = os.stat(f).st_size + if nbytes + fsize > hsize: + break + nbytes += fsize + n += 1 + rmfiles = files[:-n] + else: + raise ValueError('Units of {0!r} not understood'.format(unit)) + # finally, clean up files + for _, _, f in rmfiles: + try: + os.remove(f) + except OSError: + pass + + def unlocked_files(self): + """Finds the history files and returns the ones that are unlocked, this is + sorted by the last closed time. Returns a list of (timestamp, file) tuples. + """ + xdd = os.path.abspath(builtins.__xonsh_env__.get('XONSH_DATA_DIR')) + fs = [f for f in iglob(os.path.join(xdd, 'xonsh-*.json'))] + files = [] + for f in fs: + try: + lj = lazyjson.LazyJSON(f, reopen=False) + if lj['locked']: + continue + # info: closing timestamp, number of commands, filename + files.append((lj['ts'][1], len(lj.sizes['cmds']) - 1, f)) + lj.close() + except (IOError, OSError, ValueError): + continue + files.sort() + return files + + +class HistoryFlusher(Thread): + + def __init__(self, filename, buffer, queue, cond, at_exit=False, *args, **kwargs): + """Thread for flushing history.""" + super(HistoryFlusher, self).__init__(*args, **kwargs) + self.filename = filename + self.buffer = buffer + self.queue = queue + queue.append(self) + self.cond = cond + self.at_exit = at_exit + if at_exit: + self.dump() + queue.popleft() + else: + self.start() + + def run(self): + with self.cond: + self.cond.wait_for(self.i_am_at_the_front) + self.dump() + self.queue.popleft() + + def i_am_at_the_front(self): + """Tests if the flusher is at the front of the queue.""" + return self is self.queue[0] + + def dump(self): + with open(self.filename, 'r', newline='\n') as f: + hist = lazyjson.LazyJSON(f).load() + hist['cmds'].extend(self.buffer) + if self.at_exit: + hist['ts'][1] = time.time() # apply end time + hist['locked'] = False + with open(self.filename, 'w', newline='\n') as f: + lazyjson.dump(hist, f, sort_keys=True) + + +class CommandField(Sequence): + + def __init__(self, field, hist, default=None): + """Represents a field in the 'cmds' portion of history. Will query the buffer + for the relevant data, if possible. Otherwise it will lazily acquire data from + the file. + + Parameters + ---------- + field : str + The name of the field to query. + hist : History object + The history object to query. + default : optional + The default value to return if key is not present. + """ + self.field = field + self.hist = hist + self.default = default + + def __len__(self): + return len(self.hist) + + def __getitem__(self, key): + size = len(self) + if isinstance(key, slice): + return [self[i] for i in range(*key.indices(size))] + elif not isinstance(key, int): + raise IndexError('CommandField may only be indexed by int or slice.') + # now we know we have an int + key = size + key if key < 0 else key # ensure key is non-negative + bufsize = len(self.hist.buffer) + if size - bufsize <= key: # key is in buffer + return self.hist.buffer[key + bufsize - size].get(self.field, self.default) + # now we know we have to go into the file + queue = self.hist._queue + queue.append(self) + with self.hist._cond: + self.hist._cond.wait_for(self.i_am_at_the_front) + with open(self.hist.filename, 'r', newline='\n') as f: + lj = lazyjson.LazyJSON(f, reopen=False) + rtn = lj['cmds'][key].get(self.field, self.default) + if isinstance(rtn, lazyjson.Node): + rtn = rtn.load() + queue.popleft() + return rtn + + def i_am_at_the_front(self): + """Tests if the command field is at the front of the queue.""" + return self is self.hist._queue[0] + + +class History(object): + + def __init__(self, filename=None, sessionid=None, buffersize=100, gc=True, **meta): + """Represents a xonsh session's history as an in-memory buffer that is + periodically flushed to disk. + + Parameters + ---------- + filename : str, optional + Location of history file, defaults to + ``$XONSH_DATA_DIR/xonsh-{sessionid}.json``. + sessionid : int, uuid, str, optional + Current session identifier, will generate a new sessionid if not set. + buffersize : int, optional + Maximum buffersize in memory. + meta : optional + Top-level metadata to store along with the history. The kwargs 'cmds' and + 'sessionid' are not allowed and will be overwritten. + gc : bool, optional + Run garbage collector flag. + """ + self.sessionid = sid = uuid.uuid4() if sessionid is None else sessionid + if filename is None: + self.filename = os.path.join(builtins.__xonsh_env__.get('XONSH_DATA_DIR'), + 'xonsh-{0}.json'.format(sid)) + else: + self.filename = filename + self.buffer = [] + self.buffersize = buffersize + self._queue = deque() + self._cond = Condition() + self._len = 0 + self.last_cmd_out = None + self.last_cmd_rtn = None + meta['cmds'] = [] + meta['sessionid'] = str(sid) + with open(self.filename, 'w', newline='\n') as f: + lazyjson.dump(meta, f, sort_keys=True) + self.gc = HistoryGC() if gc else None + # command fields that are known + self.tss = CommandField('ts', self) + self.inps = CommandField('inp', self) + self.outs = CommandField('out', self) + self.rtns = CommandField('rtn', self) + + def __len__(self): + return self._len + + def append(self, cmd): + """Appends command to history. Will periodically flush the history to file. + + Parameters + ---------- + cmd : dict + Command dictionary that should be added to the ordered history. + + Returns + ------- + hf : HistoryFlusher or None + The thread that was spawned to flush history + """ + self.buffer.append(cmd) + self._len += 1 # must come before flushing + if len(self.buffer) >= self.buffersize: + hf = self.flush() + else: + hf = None + return hf + + def flush(self, at_exit=False): + """Flushes the current command buffer to disk. + + Parameters + ---------- + at_exit : bool, optional + Whether the HistoryFlusher should act as a thread in the background, + or execute immeadiately and block. + + Returns + ------- + hf : HistoryFlusher or None + The thread that was spawned to flush history + """ + if len(self.buffer) == 0: + return + hf = HistoryFlusher(self.filename, tuple(self.buffer), self._queue, self._cond, + at_exit=at_exit) + self.buffer.clear() + return hf + +# +# Interface to History +# + +_HIST_PARSER = None + +def _create_parser(): + global _HIST_PARSER + if _HIST_PARSER is not None: + return _HIST_PARSER + from argparse import ArgumentParser + p = ArgumentParser(prog='history', + description='Tools for dealing with history') + subp = p.add_subparsers(title='action', dest='action') + # show action + show = subp.add_parser('show', help='displays current history, default action') + show.add_argument('-r', dest='reverse', default=False, action='store_true', + help='reverses the direction') + show.add_argument('n', nargs='?', default=None, + help='displays n current history entries, n may be an int or use ' + 'Python slice notation') + # id + idp = subp.add_parser('id', help='displays the current session id') + # file + fp = subp.add_parser('file', help='displays the current history filename') + # info + info = subp.add_parser('info', help='displays information about the current history') + info.add_argument('--json', dest='json', default=False, action='store_true', + help='print in JSON format') + # diff + diff = subp.add_parser('diff', help='diffs two xonsh history files') + diff_history._create_parser(p=diff) + # replay, dynamically + from xonsh import replay + rp = subp.add_parser('replay', help='replays a xonsh history file') + replay._create_parser(p=rp) + _MAIN_ACTIONS['replay'] = replay._main_action + # gc + gcp = subp.add_parser('gc', help='launches a new history garbage collector') + gcp.add_argument('--size', nargs=2, dest='size', default=None, + help='next two arguments represent the history size and units, ' + 'eg "--size 8128 commands"') + bgcp = gcp.add_mutually_exclusive_group() + bgcp.add_argument('--blocking', dest='blocking', default=True, action='store_true', + help='ensures that the gc blocks the main thread, default True') + bgcp.add_argument('--non-blocking', dest='blocking', action='store_false', + help='makes the gc non-blocking, and thus return sooner') + # set and return + _HIST_PARSER = p + return p + + +def _show(ns, hist): + idx = ensure_int_or_slice(ns.n) + if len(hist) == 0: + return + inps = hist.inps[idx] + if isinstance(idx, int): + inps = [inps] + indices = [idx if idx >= 0 else len(hist) + idx] + else: + indices = list(range(*idx.indices(len(hist)))) + ndigits = len(str(indices[-1])) + indent = ' '*(ndigits + 3) + if ns.reverse: + indices = reversed(indices) + inps = reversed(inps) + for i, inp in zip(indices, inps): + lines = inp.splitlines() + lines[0] = ' {0:>{1}} {2}'.format(i, ndigits, lines[0]) + lines[1:] = [indent + x for x in lines[1:]] + print('\n'.join(lines)) + + +def _info(ns, hist): + data = OrderedDict() + data['sessionid'] = str(hist.sessionid) + data['filename'] = hist.filename + data['length'] = len(hist) + data['buffersize'] = hist.buffersize + data['bufferlength'] = len(hist.buffer) + if ns.json: + import json + s = json.dumps(data) + print(s) + else: + lines = ['{0}: {1}'.format(k, v) for k, v in data.items()] + print('\n'.join(lines)) + + +def _gc(ns, hist): + hist.gc = gc = HistoryGC(wait_for_shell=False, size=ns.size) + if ns.blocking: + while gc.is_alive(): + continue + + +_MAIN_ACTIONS = { + 'show': _show, + 'id': lambda ns, hist: print(hist.sessionid), + 'file': lambda ns, hist: print(hist.filename), + 'info': _info, + 'diff': diff_history._main_action, + 'gc': _gc, + } + +def main(args=None, stdin=None): + """This acts as a main funtion for history command line interfaces.""" + hist = builtins.__xonsh_history__ + parser = _create_parser() + ns = parser.parse_args(args) + if ns.action is None: # apply default action + ns = parser.parse_args(['show'] + args) + _MAIN_ACTIONS[ns.action](ns, hist) diff --git a/xonsh/history/__init__.py b/xonsh/history/__init__.py deleted file mode 100644 index 7be6415..0000000 --- a/xonsh/history/__init__.py +++ /dev/null @@ -1,27 +0,0 @@ -# amalgamate exclude -import os as _os - -if _os.getenv("XONSH_DEBUG", ""): - pass -else: - import sys as _sys - - try: - from xonsh.history import __amalgam__ - - base = __amalgam__ - _sys.modules["xonsh.history.base"] = __amalgam__ - dummy = __amalgam__ - _sys.modules["xonsh.history.dummy"] = __amalgam__ - json = __amalgam__ - _sys.modules["xonsh.history.json"] = __amalgam__ - sqlite = __amalgam__ - _sys.modules["xonsh.history.sqlite"] = __amalgam__ - main = __amalgam__ - _sys.modules["xonsh.history.main"] = __amalgam__ - del __amalgam__ - except ImportError: - pass - del _sys -del _os -# amalgamate end diff --git a/xonsh/history/base.py b/xonsh/history/base.py deleted file mode 100644 index 1dde185..0000000 --- a/xonsh/history/base.py +++ /dev/null @@ -1,155 +0,0 @@ -# -*- coding: utf-8 -*- -"""Base class of Xonsh History backends.""" -import types -import uuid - - -class HistoryEntry(types.SimpleNamespace): - """Represent a command in history. - - Attributes - ---------- - cmd: str - The command as typed by the user, including newlines - out: str - The output of the command, if xonsh is configured to save it - rtn: int - The return of the command (ie, 0 on success) - ts: two-tuple of floats - The timestamps of when the command started and finished, including - fractions. - - """ - - -class History: - """Xonsh history backend base class. - - History objects should be created via a subclass of History. - - Indexing - -------- - History acts like a sequence that can be indexed to return - ``HistoryEntry`` objects. - - Note that the most recent command is the last item in history. - - Attributes - ---------- - rtns : sequence of ints - The return of the command (ie, 0 on success) - inps : sequence of strings - The command as typed by the user, including newlines - tss : sequence of two-tuples of floats - The timestamps of when the command started and finished, including - fractions - outs : sequence of strings - The output of the command, if xonsh is configured to save it - gc : A garbage collector or None - The garbage collector - - In all of these sequences, index 0 is the oldest and -1 (the last item) - is the newest. - """ - - def __init__(self, sessionid=None, **kwargs): - """Represents a xonsh session's history. - - Parameters - ---------- - sessionid : int, uuid, str, optional - Current session identifier, will generate a new sessionid if not - set. - """ - self.sessionid = uuid.uuid4() if sessionid is None else sessionid - self.gc = None - self.buffer = None - self.filename = None - self.inps = None - self.rtns = None - self.tss = None - self.outs = None - self.last_cmd_rtn = None - self.last_cmd_out = None - - def __len__(self): - """Return the number of items in current session.""" - return len(list(self.items())) - - def __getitem__(self, item): - """Retrieve history entries, see ``History`` docs for more info.""" - if isinstance(item, int): - if item >= len(self): - raise IndexError("history index out of range") - return HistoryEntry( - cmd=self.inps[item], - out=self.outs[item], - rtn=self.rtns[item], - ts=self.tss[item], - ) - elif isinstance(item, slice): - cmds = self.inps[item] - outs = self.outs[item] - rtns = self.rtns[item] - tss = self.tss[item] - return [ - HistoryEntry(cmd=c, out=o, rtn=r, ts=t) - for c, o, r, t in zip(cmds, outs, rtns, tss) - ] - else: - raise TypeError( - "history indices must be integers " - "or slices, not {}".format(type(item)) - ) - - def __setitem__(self, *args): - raise PermissionError( - "You cannot change history! " "you can create new though." - ) - - def append(self, cmd): - """Append a command item into history. - - Parameters - ---------- - cmd: dict - This dict contains information about the command that is to be - added to the history list. It should contain the keys ``inp``, - ``rtn`` and ``ts``. These key names mirror the same names defined - as instance variables in the ``HistoryEntry`` class. - """ - pass - - def flush(self, **kwargs): - """Flush the history items to disk from a buffer.""" - pass - - def items(self, newest_first=False): - """Get history items of current session.""" - raise NotImplementedError - - def all_items(self, newest_first=False): - """Get all history items.""" - raise NotImplementedError - - def info(self): - """A collection of information about the shell history. - - Returns - ------- - dict or collections.OrderedDict - Contains history information as str key pairs. - """ - raise NotImplementedError - - def run_gc(self, size=None, blocking=True): - """Run the garbage collector. - - Parameters - ---------- - size: None or tuple of a int and a string - Determines the size and units of what would be allowed to remain. - blocking: bool - If set blocking, then wait until gc action finished. - """ - pass diff --git a/xonsh/history/dummy.py b/xonsh/history/dummy.py deleted file mode 100644 index a38912f..0000000 --- a/xonsh/history/dummy.py +++ /dev/null @@ -1,23 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh history backend.""" -import collections -from xonsh.history.base import History - - -class DummyHistory(History): - """A dummy implement of history backend.""" - - def append(self, cmd): - pass - - def items(self, newest_first=False): - yield {"inp": "dummy in action", "ts": 1464652800, "ind": 0} - - def all_items(self, newest_first=False): - return self.items(newest_first=newest_first) - - def info(self): - data = collections.OrderedDict() - data["backend"] = "dummy" - data["sessionid"] = str(self.sessionid) - return data diff --git a/xonsh/history/json.py b/xonsh/history/json.py deleted file mode 100644 index 50b6326..0000000 --- a/xonsh/history/json.py +++ /dev/null @@ -1,436 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements JSON version of xonsh history backend.""" -import os -import sys -import time -import json -import builtins -import collections -import threading -import collections.abc as cabc - -from xonsh.history.base import History -import xonsh.tools as xt -import xonsh.lazyjson as xlj -import xonsh.xoreutils.uptime as uptime - - -def _xhj_gc_commands_to_rmfiles(hsize, files): - """Return the history files to remove to get under the command limit.""" - rmfiles = [] - n = 0 - ncmds = 0 - for ts, fcmds, f in files[::-1]: - if fcmds == 0: - # we need to make sure that 'empty' history files don't hang around - rmfiles.append((ts, fcmds, f)) - if ncmds + fcmds > hsize: - break - ncmds += fcmds - n += 1 - rmfiles += files[:-n] - return rmfiles - - -def _xhj_gc_files_to_rmfiles(hsize, files): - """Return the history files to remove to get under the file limit.""" - rmfiles = files[:-hsize] if len(files) > hsize else [] - return rmfiles - - -def _xhj_gc_seconds_to_rmfiles(hsize, files): - """Return the history files to remove to get under the age limit.""" - rmfiles = [] - now = time.time() - for ts, _, f in files: - if (now - ts) < hsize: - break - rmfiles.append((None, None, f)) - return rmfiles - - -def _xhj_gc_bytes_to_rmfiles(hsize, files): - """Return the history files to remove to get under the byte limit.""" - rmfiles = [] - n = 0 - nbytes = 0 - for _, _, f in files[::-1]: - fsize = os.stat(f).st_size - if nbytes + fsize > hsize: - break - nbytes += fsize - n += 1 - rmfiles = files[:-n] - return rmfiles - - -def _xhj_get_history_files(sort=True, newest_first=False): - """Find and return the history files. Optionally sort files by - modify time. - """ - data_dir = builtins.__xonsh__.env.get("XONSH_DATA_DIR") - data_dir = xt.expanduser_abs_path(data_dir) - try: - files = [ - os.path.join(data_dir, f) - for f in os.listdir(data_dir) - if f.startswith("xonsh-") and f.endswith(".json") - ] - except OSError: - files = [] - if builtins.__xonsh__.env.get("XONSH_DEBUG"): - xt.print_exception("Could not collect xonsh history files.") - if sort: - files.sort(key=lambda x: os.path.getmtime(x), reverse=newest_first) - return files - - -class JsonHistoryGC(threading.Thread): - """Shell history garbage collection.""" - - def __init__(self, wait_for_shell=True, size=None, *args, **kwargs): - """Thread responsible for garbage collecting old history. - - May wait for shell (and for xonshrc to have been loaded) to start work. - """ - super().__init__(*args, **kwargs) - self.daemon = True - self.size = size - self.wait_for_shell = wait_for_shell - self.start() - self.gc_units_to_rmfiles = { - "commands": _xhj_gc_commands_to_rmfiles, - "files": _xhj_gc_files_to_rmfiles, - "s": _xhj_gc_seconds_to_rmfiles, - "b": _xhj_gc_bytes_to_rmfiles, - } - - def run(self): - while self.wait_for_shell: - time.sleep(0.01) - env = builtins.__xonsh__.env # pylint: disable=no-member - if self.size is None: - hsize, units = env.get("XONSH_HISTORY_SIZE") - else: - hsize, units = xt.to_history_tuple(self.size) - files = self.files(only_unlocked=True) - rmfiles_fn = self.gc_units_to_rmfiles.get(units) - if rmfiles_fn is None: - raise ValueError("Units type {0!r} not understood".format(units)) - - for _, _, f in rmfiles_fn(hsize, files): - try: - os.remove(f) - except OSError: - pass - - def files(self, only_unlocked=False): - """Find and return the history files. Optionally locked files may be - excluded. - - This is sorted by the last closed time. Returns a list of - (timestamp, number of cmds, file name) tuples. - """ - # pylint: disable=no-member - env = getattr(builtins, "__xonsh__.env", None) - if env is None: - return [] - boot = uptime.boottime() - fs = _xhj_get_history_files(sort=False) - files = [] - for f in fs: - try: - if os.path.getsize(f) == 0: - # collect empty files (for gc) - files.append((time.time(), 0, f)) - continue - lj = xlj.LazyJSON(f, reopen=False) - if lj["locked"] and lj["ts"][0] < boot: - # computer was rebooted between when this history was created - # and now and so this history should be unlocked. - hist = lj.load() - lj.close() - hist["locked"] = False - with open(f, "w", newline="\n") as fp: - xlj.ljdump(hist, fp, sort_keys=True) - lj = xlj.LazyJSON(f, reopen=False) - if only_unlocked and lj["locked"]: - continue - # info: closing timestamp, number of commands, filename - files.append((lj["ts"][1] or lj["ts"][0], len(lj.sizes["cmds"]) - 1, f)) - lj.close() - except (IOError, OSError, ValueError): - continue - files.sort() - return files - - -class JsonHistoryFlusher(threading.Thread): - """Flush shell history to disk periodically.""" - - def __init__(self, filename, buffer, queue, cond, at_exit=False, *args, **kwargs): - """Thread for flushing history.""" - super(JsonHistoryFlusher, self).__init__(*args, **kwargs) - self.filename = filename - self.buffer = buffer - self.queue = queue - queue.append(self) - self.cond = cond - self.at_exit = at_exit - if at_exit: - self.dump() - queue.popleft() - else: - self.start() - - def run(self): - with self.cond: - self.cond.wait_for(self.i_am_at_the_front) - self.dump() - self.queue.popleft() - - def i_am_at_the_front(self): - """Tests if the flusher is at the front of the queue.""" - return self is self.queue[0] - - def dump(self): - """Write the cached history to external storage.""" - opts = builtins.__xonsh__.env.get("HISTCONTROL") - last_inp = None - cmds = [] - for cmd in self.buffer: - if "ignoredups" in opts and cmd["inp"] == last_inp: - # Skipping dup cmd - continue - if "ignoreerr" in opts and cmd["rtn"] != 0: - # Skipping failed cmd - continue - cmds.append(cmd) - last_inp = cmd["inp"] - with open(self.filename, "r", newline="\n") as f: - hist = xlj.LazyJSON(f).load() - load_hist_len = len(hist["cmds"]) - hist["cmds"].extend(cmds) - if self.at_exit: - hist["ts"][1] = time.time() # apply end time - hist["locked"] = False - if not builtins.__xonsh__.env.get("XONSH_STORE_STDOUT", False): - [cmd.pop("out") for cmd in hist["cmds"][load_hist_len:] if "out" in cmd] - with open(self.filename, "w", newline="\n") as f: - xlj.ljdump(hist, f, sort_keys=True) - - -class JsonCommandField(cabc.Sequence): - """A field in the 'cmds' portion of history.""" - - def __init__(self, field, hist, default=None): - """Represents a field in the 'cmds' portion of history. - - Will query the buffer for the relevant data, if possible. Otherwise it - will lazily acquire data from the file. - - Parameters - ---------- - field : str - The name of the field to query. - hist : History object - The history object to query. - default : optional - The default value to return if key is not present. - """ - self.field = field - self.hist = hist - self.default = default - - def __len__(self): - return len(self.hist) - - def __getitem__(self, key): - size = len(self) - if isinstance(key, slice): - return [self[i] for i in range(*key.indices(size))] - elif not isinstance(key, int): - raise IndexError("JsonCommandField may only be indexed by int or slice.") - elif size == 0: - raise IndexError("JsonCommandField is empty.") - # now we know we have an int - key = size + key if key < 0 else key # ensure key is non-negative - bufsize = len(self.hist.buffer) - if size - bufsize <= key: # key is in buffer - return self.hist.buffer[key + bufsize - size].get(self.field, self.default) - # now we know we have to go into the file - queue = self.hist._queue - queue.append(self) - with self.hist._cond: - self.hist._cond.wait_for(self.i_am_at_the_front) - with open(self.hist.filename, "r", newline="\n") as f: - lj = xlj.LazyJSON(f, reopen=False) - rtn = lj["cmds"][key].get(self.field, self.default) - if isinstance(rtn, xlj.LJNode): - rtn = rtn.load() - queue.popleft() - return rtn - - def i_am_at_the_front(self): - """Tests if the command field is at the front of the queue.""" - return self is self.hist._queue[0] - - -class JsonHistory(History): - """Xonsh history backend implemented with JSON files. - - JsonHistory implements two extra actions: ``diff``, and ``replay``. - """ - - def __init__(self, filename=None, sessionid=None, buffersize=100, gc=True, **meta): - """Represents a xonsh session's history as an in-memory buffer that is - periodically flushed to disk. - - Parameters - ---------- - filename : str, optional - Location of history file, defaults to - ``$XONSH_DATA_DIR/xonsh-{sessionid}.json``. - sessionid : int, uuid, str, optional - Current session identifier, will generate a new sessionid if not - set. - buffersize : int, optional - Maximum buffersize in memory. - meta : optional - Top-level metadata to store along with the history. The kwargs - 'cmds' and 'sessionid' are not allowed and will be overwritten. - gc : bool, optional - Run garbage collector flag. - """ - super().__init__(sessionid=sessionid, **meta) - if filename is None: - # pylint: disable=no-member - data_dir = builtins.__xonsh__.env.get("XONSH_DATA_DIR") - data_dir = os.path.expanduser(data_dir) - self.filename = os.path.join( - data_dir, "xonsh-{0}.json".format(self.sessionid) - ) - else: - self.filename = filename - self.buffer = [] - self.buffersize = buffersize - self._queue = collections.deque() - self._cond = threading.Condition() - self._len = 0 - self.last_cmd_out = None - self.last_cmd_rtn = None - meta["cmds"] = [] - meta["sessionid"] = str(self.sessionid) - with open(self.filename, "w", newline="\n") as f: - xlj.ljdump(meta, f, sort_keys=True) - self.gc = JsonHistoryGC() if gc else None - # command fields that are known - self.tss = JsonCommandField("ts", self) - self.inps = JsonCommandField("inp", self) - self.outs = JsonCommandField("out", self) - self.rtns = JsonCommandField("rtn", self) - - def __len__(self): - return self._len - - def append(self, cmd): - """Appends command to history. Will periodically flush the history to file. - - Parameters - ---------- - cmd : dict - This dict contains information about the command that is to be - added to the history list. It should contain the keys ``inp``, - ``rtn`` and ``ts``. These key names mirror the same names defined - as instance variables in the ``HistoryEntry`` class. - - Returns - ------- - hf : JsonHistoryFlusher or None - The thread that was spawned to flush history - """ - self.buffer.append(cmd) - self._len += 1 # must come before flushing - if len(self.buffer) >= self.buffersize: - hf = self.flush() - else: - hf = None - return hf - - def flush(self, at_exit=False): - """Flushes the current command buffer to disk. - - Parameters - ---------- - at_exit : bool, optional - Whether the JsonHistoryFlusher should act as a thread in the - background, or execute immediately and block. - - Returns - ------- - hf : JsonHistoryFlusher or None - The thread that was spawned to flush history - """ - if len(self.buffer) == 0: - return - hf = JsonHistoryFlusher( - self.filename, tuple(self.buffer), self._queue, self._cond, at_exit=at_exit - ) - self.buffer.clear() - return hf - - def items(self, newest_first=False): - """Display history items of current session.""" - if newest_first: - items = zip(reversed(self.inps), reversed(self.tss)) - else: - items = zip(self.inps, self.tss) - for item, tss in items: - yield {"inp": item.rstrip(), "ts": tss[0]} - - def all_items(self, newest_first=False, **kwargs): - """ - Returns all history as found in XONSH_DATA_DIR. - - yield format: {'inp': cmd, 'rtn': 0, ...} - """ - while self.gc and self.gc.is_alive(): - time.sleep(0.011) # gc sleeps for 0.01 secs, sleep a beat longer - for f in _xhj_get_history_files(newest_first=newest_first): - try: - json_file = xlj.LazyJSON(f, reopen=False) - except ValueError: - # Invalid json file - continue - try: - commands = json_file.load()["cmds"] - except json.decoder.JSONDecodeError: - # file is corrupted somehow - if builtins.__xonsh__.env.get("XONSH_DEBUG") > 0: - msg = "xonsh history file {0!r} is not valid JSON" - print(msg.format(f), file=sys.stderr) - continue - if newest_first: - commands = reversed(commands) - for c in commands: - yield {"inp": c["inp"].rstrip(), "ts": c["ts"][0]} - # all items should also include session items - yield from self.items() - - def info(self): - data = collections.OrderedDict() - data["backend"] = "json" - data["sessionid"] = str(self.sessionid) - data["filename"] = self.filename - data["length"] = len(self) - data["buffersize"] = self.buffersize - data["bufferlength"] = len(self.buffer) - envs = builtins.__xonsh__.env - data["gc options"] = envs.get("XONSH_HISTORY_SIZE") - return data - - def run_gc(self, size=None, blocking=True): - self.gc = JsonHistoryGC(wait_for_shell=False, size=size) - if blocking: - while self.gc.is_alive(): - continue diff --git a/xonsh/history/main.py b/xonsh/history/main.py deleted file mode 100644 index 7c34d21..0000000 --- a/xonsh/history/main.py +++ /dev/null @@ -1,417 +0,0 @@ -# -*- coding: utf-8 -*- -"""Main entry points of the xonsh history.""" -import argparse -import builtins -import datetime -import functools -import json -import os -import sys - -from xonsh.history.base import History -from xonsh.history.dummy import DummyHistory -from xonsh.history.json import JsonHistory -from xonsh.history.sqlite import SqliteHistory -import xonsh.diff_history as xdh -import xonsh.lazyasd as xla -import xonsh.tools as xt - -HISTORY_BACKENDS = {"dummy": DummyHistory, "json": JsonHistory, "sqlite": SqliteHistory} - - -def construct_history(**kwargs): - """Construct the history backend object.""" - env = builtins.__xonsh__.env - backend = env.get("XONSH_HISTORY_BACKEND") - if isinstance(backend, str) and backend in HISTORY_BACKENDS: - kls_history = HISTORY_BACKENDS[backend] - elif xt.is_class(backend): - kls_history = backend - elif isinstance(backend, History): - return backend - else: - print( - "Unknown history backend: {}. Using JSON version".format(backend), - file=sys.stderr, - ) - kls_history = JsonHistory - return kls_history(**kwargs) - - -def _xh_session_parser(hist=None, newest_first=False, **kwargs): - """Returns history items of current session.""" - if hist is None: - hist = builtins.__xonsh__.history - return hist.items() - - -def _xh_all_parser(hist=None, newest_first=False, **kwargs): - """Returns all history items.""" - if hist is None: - hist = builtins.__xonsh__.history - return hist.all_items(newest_first=newest_first) - - -def _xh_find_histfile_var(file_list, default=None): - """Return the path of the history file - from the value of the envvar HISTFILE. - """ - for f in file_list: - f = xt.expanduser_abs_path(f) - if not os.path.isfile(f): - continue - with open(f, "r") as rc_file: - for line in rc_file: - if line.startswith("HISTFILE="): - hist_file = line.split("=", 1)[1].strip("'\"\n") - hist_file = xt.expanduser_abs_path(hist_file) - if os.path.isfile(hist_file): - return hist_file - else: - if default: - default = xt.expanduser_abs_path(default) - if os.path.isfile(default): - return default - - -def _xh_bash_hist_parser(location=None, **kwargs): - """Yield commands from bash history file""" - if location is None: - location = _xh_find_histfile_var( - [os.path.join("~", ".bashrc"), os.path.join("~", ".bash_profile")], - os.path.join("~", ".bash_history"), - ) - if location: - with open(location, "r", errors="backslashreplace") as bash_hist: - for ind, line in enumerate(bash_hist): - yield {"inp": line.rstrip(), "ts": 0.0, "ind": ind} - else: - print("No bash history file", file=sys.stderr) - - -def _xh_zsh_hist_parser(location=None, **kwargs): - """Yield commands from zsh history file""" - if location is None: - location = _xh_find_histfile_var( - [os.path.join("~", ".zshrc"), os.path.join("~", ".zprofile")], - os.path.join("~", ".zsh_history"), - ) - if location: - with open(location, "r", errors="backslashreplace") as zsh_hist: - for ind, line in enumerate(zsh_hist): - if line.startswith(":"): - try: - start_time, command = line.split(";", 1) - except ValueError: - # Invalid history entry - continue - try: - start_time = float(start_time.split(":")[1]) - except ValueError: - start_time = 0.0 - yield {"inp": command.rstrip(), "ts": start_time, "ind": ind} - else: - yield {"inp": line.rstrip(), "ts": 0.0, "ind": ind} - - else: - print("No zsh history file found", file=sys.stderr) - - -def _xh_filter_ts(commands, start_time, end_time): - """Yield only the commands between start and end time.""" - for cmd in commands: - if start_time <= cmd["ts"] < end_time: - yield cmd - - -def _xh_get_history( - session="session", - *, - slices=None, - datetime_format=None, - start_time=None, - end_time=None, - location=None -): - """Get the requested portion of shell history. - - Parameters - ---------- - session: {'session', 'all', 'xonsh', 'bash', 'zsh'} - The history session to get. - slices : list of slice-like objects, optional - Get only portions of history. - start_time, end_time: float, optional - Filter commands by timestamp. - location: string, optional - The history file location (bash or zsh) - - Returns - ------- - generator - A filtered list of commands - """ - cmds = [] - for i, item in enumerate(_XH_HISTORY_SESSIONS[session](location=location)): - item["ind"] = i - cmds.append(item) - if slices: - # transform/check all slices - slices = [xt.ensure_slice(s) for s in slices] - cmds = xt.get_portions(cmds, slices) - if start_time or end_time: - if start_time is None: - start_time = 0.0 - else: - start_time = xt.ensure_timestamp(start_time, datetime_format) - if end_time is None: - end_time = float("inf") - else: - end_time = xt.ensure_timestamp(end_time, datetime_format) - cmds = _xh_filter_ts(cmds, start_time, end_time) - return cmds - - -def _xh_show_history(hist, ns, stdout=None, stderr=None): - """Show the requested portion of shell history. - Accepts same parameters with `_xh_get_history`. - """ - try: - commands = _xh_get_history( - ns.session, - slices=ns.slices, - start_time=ns.start_time, - end_time=ns.end_time, - datetime_format=ns.datetime_format, - ) - except Exception as err: - print("history: error: {}".format(err), file=stderr) - return - if ns.reverse: - commands = reversed(list(commands)) - end = "\0" if ns.null_byte else "\n" - if ns.numerate and ns.timestamp: - for c in commands: - dt = datetime.datetime.fromtimestamp(c["ts"]) - print( - "{}:({}) {}".format(c["ind"], xt.format_datetime(dt), c["inp"]), - file=stdout, - end=end, - ) - elif ns.numerate: - for c in commands: - print("{}: {}".format(c["ind"], c["inp"]), file=stdout, end=end) - elif ns.timestamp: - for c in commands: - dt = datetime.datetime.fromtimestamp(c["ts"]) - print( - "({}) {}".format(xt.format_datetime(dt), c["inp"]), file=stdout, end=end - ) - else: - for c in commands: - print(c["inp"], file=stdout, end=end) - - -@xla.lazyobject -def _XH_HISTORY_SESSIONS(): - return { - "session": _xh_session_parser, - "xonsh": _xh_all_parser, - "all": _xh_all_parser, - "zsh": _xh_zsh_hist_parser, - "bash": _xh_bash_hist_parser, - } - - -_XH_MAIN_ACTIONS = {"show", "id", "file", "info", "diff", "gc"} - - -@functools.lru_cache() -def _xh_create_parser(): - """Create a parser for the "history" command.""" - p = argparse.ArgumentParser( - prog="history", description="try 'history --help' " "for more info" - ) - subp = p.add_subparsers(title="commands", dest="action") - # session action - show = subp.add_parser( - "show", prefix_chars="-+", help="display history of a session, default command" - ) - show.add_argument( - "-r", - dest="reverse", - default=False, - action="store_true", - help="reverses the direction", - ) - show.add_argument( - "-n", - dest="numerate", - default=False, - action="store_true", - help="numerate each command", - ) - show.add_argument( - "-t", - dest="timestamp", - default=False, - action="store_true", - help="show command timestamps", - ) - show.add_argument( - "-T", dest="end_time", default=None, help="show only commands before timestamp" - ) - show.add_argument( - "+T", dest="start_time", default=None, help="show only commands after timestamp" - ) - show.add_argument( - "-f", - dest="datetime_format", - default=None, - help="the datetime format to be used for" "filtering and printing", - ) - show.add_argument( - "-0", - dest="null_byte", - default=False, - action="store_true", - help="separate commands by the null character for piping " - "history to external filters", - ) - show.add_argument( - "session", - nargs="?", - choices=_XH_HISTORY_SESSIONS.keys(), - default="session", - metavar="session", - help="{} (default: current session, all is an alias for xonsh)" - "".format(", ".join(map(repr, _XH_HISTORY_SESSIONS.keys()))), - ) - show.add_argument( - "slices", - nargs="*", - default=None, - metavar="slice", - help="integer or slice notation", - ) - # 'id' subcommand - subp.add_parser("id", help="display the current session id") - # 'file' subcommand - subp.add_parser("file", help="display the current history filename") - # 'info' subcommand - info = subp.add_parser( - "info", help=("display information about the " "current history") - ) - info.add_argument( - "--json", - dest="json", - default=False, - action="store_true", - help="print in JSON format", - ) - - # gc - gcp = subp.add_parser("gc", help="launches a new history garbage collector") - gcp.add_argument( - "--size", - nargs=2, - dest="size", - default=None, - help=( - "next two arguments represent the history size and " - 'units; e.g. "--size 8128 commands"' - ), - ) - bgcp = gcp.add_mutually_exclusive_group() - bgcp.add_argument( - "--blocking", - dest="blocking", - default=True, - action="store_true", - help=("ensures that the gc blocks the main thread, " "default True"), - ) - bgcp.add_argument( - "--non-blocking", - dest="blocking", - action="store_false", - help="makes the gc non-blocking, and thus return sooner", - ) - - hist = builtins.__xonsh__.history - if isinstance(hist, JsonHistory): - # add actions belong only to JsonHistory - diff = subp.add_parser("diff", help="diff two xonsh history files") - xdh.dh_create_parser(p=diff) - - import xonsh.replay as xrp - - replay = subp.add_parser("replay", help="replay a xonsh history file") - xrp.replay_create_parser(p=replay) - _XH_MAIN_ACTIONS.add("replay") - - return p - - -def _xh_parse_args(args): - """Prepare and parse arguments for the history command. - - Add default action for ``history`` and - default session for ``history show``. - """ - parser = _xh_create_parser() - if not args: - args = ["show", "session"] - elif args[0] not in _XH_MAIN_ACTIONS and args[0] not in ("-h", "--help"): - args = ["show", "session"] + args - if args[0] == "show": - if not any(a in _XH_HISTORY_SESSIONS for a in args): - args.insert(1, "session") - ns, slices = parser.parse_known_args(args) - if slices: - if not ns.slices: - ns.slices = slices - else: - ns.slices.extend(slices) - else: - ns = parser.parse_args(args) - return ns - - -def history_main( - args=None, stdin=None, stdout=None, stderr=None, spec=None, stack=None -): - """This is the history command entry point.""" - hist = builtins.__xonsh__.history - ns = _xh_parse_args(args) - if not ns or not ns.action: - return - if ns.action == "show": - _xh_show_history(hist, ns, stdout=stdout, stderr=stderr) - elif ns.action == "info": - data = hist.info() - if ns.json: - s = json.dumps(data) - print(s, file=stdout) - else: - lines = ["{0}: {1}".format(k, v) for k, v in data.items()] - print("\n".join(lines), file=stdout) - elif ns.action == "id": - if not hist.sessionid: - return - print(str(hist.sessionid), file=stdout) - elif ns.action == "file": - if not hist.filename: - return - print(str(hist.filename), file=stdout) - elif ns.action == "gc": - hist.run_gc(size=ns.size, blocking=ns.blocking) - elif ns.action == "diff": - if isinstance(hist, JsonHistory): - xdh.dh_main_action(ns) - elif ns.action == "replay": - if isinstance(hist, JsonHistory): - import xonsh.replay as xrp - - xrp.replay_main_action(hist, ns, stdout=stdout, stderr=stderr) - else: - print("Unknown history action {}".format(ns.action), file=sys.stderr) diff --git a/xonsh/history/sqlite.py b/xonsh/history/sqlite.py deleted file mode 100644 index 430e32a..0000000 --- a/xonsh/history/sqlite.py +++ /dev/null @@ -1,240 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh history backend via sqlite3.""" -import builtins -import collections -import json -import os -import sqlite3 -import sys -import threading -import time - -from xonsh.history.base import History -import xonsh.tools as xt - - -def _xh_sqlite_get_file_name(): - envs = builtins.__xonsh__.env - file_name = envs.get("XONSH_HISTORY_SQLITE_FILE") - if not file_name: - data_dir = envs.get("XONSH_DATA_DIR") - file_name = os.path.join(data_dir, "xonsh-history.sqlite") - return xt.expanduser_abs_path(file_name) - - -def _xh_sqlite_get_conn(filename=None): - if filename is None: - filename = _xh_sqlite_get_file_name() - return sqlite3.connect(filename) - - -def _xh_sqlite_create_history_table(cursor): - """Create Table for history items. - - Columns: - info - JSON formatted, reserved for future extension. - """ - cursor.execute( - """ - CREATE TABLE IF NOT EXISTS xonsh_history - (inp TEXT, - rtn INTEGER, - tsb REAL, - tse REAL, - sessionid TEXT, - out TEXT, - info TEXT - ) - """ - ) - - -def _xh_sqlite_insert_command(cursor, cmd, sessionid, store_stdout): - sql = "INSERT INTO xonsh_history (inp, rtn, tsb, tse, sessionid" - tss = cmd.get("ts", [None, None]) - params = [cmd["inp"].rstrip(), cmd["rtn"], tss[0], tss[1], sessionid] - if store_stdout and "out" in cmd: - sql += ", out" - params.append(cmd["out"]) - if "info" in cmd: - sql += ", info" - info = json.dumps(cmd["info"]) - params.append(info) - sql += ") VALUES (" + ("?, " * len(params)).rstrip(", ") + ")" - cursor.execute(sql, tuple(params)) - - -def _xh_sqlite_get_count(cursor, sessionid=None): - sql = "SELECT count(*) FROM xonsh_history " - params = [] - if sessionid is not None: - sql += "WHERE sessionid = ? " - params.append(str(sessionid)) - cursor.execute(sql, tuple(params)) - return cursor.fetchone()[0] - - -def _xh_sqlite_get_records(cursor, sessionid=None, limit=None, newest_first=False): - sql = "SELECT inp, tsb, rtn FROM xonsh_history " - params = [] - if sessionid is not None: - sql += "WHERE sessionid = ? " - params.append(sessionid) - sql += "ORDER BY tsb " - if newest_first: - sql += "DESC " - if limit is not None: - sql += "LIMIT %d " % limit - cursor.execute(sql, tuple(params)) - return cursor.fetchall() - - -def _xh_sqlite_delete_records(cursor, size_to_keep): - sql = "SELECT min(tsb) FROM (" - sql += "SELECT tsb FROM xonsh_history ORDER BY tsb DESC " - sql += "LIMIT %d)" % size_to_keep - cursor.execute(sql) - result = cursor.fetchone() - if not result: - return - max_tsb = result[0] - sql = "DELETE FROM xonsh_history WHERE tsb < ?" - result = cursor.execute(sql, (max_tsb,)) - return result.rowcount - - -def xh_sqlite_append_history(cmd, sessionid, store_stdout, filename=None): - with _xh_sqlite_get_conn(filename=filename) as conn: - c = conn.cursor() - _xh_sqlite_create_history_table(c) - _xh_sqlite_insert_command(c, cmd, sessionid, store_stdout) - conn.commit() - - -def xh_sqlite_get_count(sessionid=None, filename=None): - with _xh_sqlite_get_conn(filename=filename) as conn: - c = conn.cursor() - return _xh_sqlite_get_count(c, sessionid=sessionid) - - -def xh_sqlite_items(sessionid=None, filename=None, newest_first=False): - with _xh_sqlite_get_conn(filename=filename) as conn: - c = conn.cursor() - _xh_sqlite_create_history_table(c) - return _xh_sqlite_get_records(c, sessionid=sessionid, newest_first=newest_first) - - -def xh_sqlite_delete_items(size_to_keep, filename=None): - with _xh_sqlite_get_conn(filename=filename) as conn: - c = conn.cursor() - _xh_sqlite_create_history_table(c) - return _xh_sqlite_delete_records(c, size_to_keep) - - -class SqliteHistoryGC(threading.Thread): - """Shell history garbage collection.""" - - def __init__(self, wait_for_shell=True, size=None, filename=None, *args, **kwargs): - """Thread responsible for garbage collecting old history. - - May wait for shell (and for xonshrc to have been loaded) to start work. - """ - super().__init__(*args, **kwargs) - self.daemon = True - self.filename = filename - self.size = size - self.wait_for_shell = wait_for_shell - self.start() - - def run(self): - while self.wait_for_shell: - time.sleep(0.01) - if self.size is not None: - hsize, units = xt.to_history_tuple(self.size) - else: - envs = builtins.__xonsh__.env - hsize, units = envs.get("XONSH_HISTORY_SIZE") - if units != "commands": - print( - "sqlite backed history gc currently only supports " - '"commands" as units', - file=sys.stderr, - ) - return - if hsize < 0: - return - xh_sqlite_delete_items(hsize, filename=self.filename) - - -class SqliteHistory(History): - """Xonsh history backend implemented with sqlite3.""" - - def __init__(self, gc=True, filename=None, **kwargs): - super().__init__(**kwargs) - if filename is None: - filename = _xh_sqlite_get_file_name() - self.filename = filename - self.gc = SqliteHistoryGC() if gc else None - self._last_hist_inp = None - self.inps = [] - self.rtns = [] - self.outs = [] - self.tss = [] - - def append(self, cmd): - envs = builtins.__xonsh__.env - opts = envs.get("HISTCONTROL") - inp = cmd["inp"].rstrip() - self.inps.append(inp) - store_stdout = envs.get("XONSH_STORE_STDOUT", False) - if store_stdout: - self.outs.append(cmd.get("out")) - else: - self.outs.append(None) - self.rtns.append(cmd["rtn"]) - self.tss.append(cmd.get("ts", (None, None))) - - opts = envs.get("HISTCONTROL") - if "ignoredups" in opts and inp == self._last_hist_inp: - # Skipping dup cmd - return - if "ignoreerr" in opts and cmd["rtn"] != 0: - # Skipping failed cmd - return - self._last_hist_inp = inp - xh_sqlite_append_history( - cmd, str(self.sessionid), store_stdout, filename=self.filename - ) - - def all_items(self, newest_first=False): - """Display all history items.""" - for item in xh_sqlite_items(filename=self.filename, newest_first=newest_first): - yield {"inp": item[0], "ts": item[1], "rtn": item[2]} - - def items(self, newest_first=False): - """Display history items of current session.""" - for item in xh_sqlite_items( - sessionid=str(self.sessionid), - filename=self.filename, - newest_first=newest_first, - ): - yield {"inp": item[0], "ts": item[1], "rtn": item[2]} - - def info(self): - data = collections.OrderedDict() - data["backend"] = "sqlite" - data["sessionid"] = str(self.sessionid) - data["filename"] = self.filename - data["session items"] = xh_sqlite_get_count( - sessionid=self.sessionid, filename=self.filename - ) - data["all items"] = xh_sqlite_get_count(filename=self.filename) - envs = builtins.__xonsh__.env - data["gc options"] = envs.get("XONSH_HISTORY_SIZE") - return data - - def run_gc(self, size=None, blocking=True): - self.gc = SqliteHistoryGC(wait_for_shell=False, size=size) - if blocking: - while self.gc.is_alive(): - continue diff --git a/xonsh/imphooks.py b/xonsh/imphooks.py index a0ff63b..ddb1f0d 100644 --- a/xonsh/imphooks.py +++ b/xonsh/imphooks.py @@ -1,46 +1,14 @@ -# -*- coding: utf-8 -*- -"""Import hooks for importing xonsh source files. - -This module registers the hooks it defines when it is imported. +"""Import hooks for importing xonsh source files. This module registers +the hooks it defines when it is imported. """ import os -import re import sys -import types import builtins -import contextlib -import importlib from importlib.machinery import ModuleSpec -from importlib.abc import MetaPathFinder, SourceLoader, Loader +from importlib.abc import MetaPathFinder, SourceLoader -from xonsh.events import events +from xonsh.tools import string_types from xonsh.execer import Execer -from xonsh.platform import scandir -from xonsh.lazyasd import lazyobject - - -@lazyobject -def ENCODING_LINE(): - # this regex comes from PEP 263 - # https://www.python.org/dev/peps/pep-0263/#defining-the-encoding - return re.compile(b"^[ tv]*#.*?coding[:=][ t]*([-_.a-zA-Z0-9]+)") - - -def find_source_encoding(src): - """Finds the source encoding given bytes representing a file. If - no encoding is found, UTF-8 will be returned as per the docs - https://docs.python.org/3/howto/unicode.html#unicode-literals-in-python-source-code - """ - utf8 = "UTF-8" - first, _, rest = src.partition(b"\n") - m = ENCODING_LINE.match(first) - if m is not None: - return m.group(1).decode(utf8) - second, _, _ = rest.partition(b"\n") - m = ENCODING_LINE.match(second) - if m is not None: - return m.group(1).decode(utf8) - return utf8 class XonshImportHook(MetaPathFinder, SourceLoader): @@ -53,12 +21,8 @@ def __init__(self, *args, **kwargs): @property def execer(self): - if ( - hasattr(builtins, "__xonsh__") - and hasattr(builtins.__xonsh__, "execer") - and builtins.__xonsh__.execer is not None - ): - execer = builtins.__xonsh__.execer + if hasattr(builtins, '__xonsh_execer__'): + execer = builtins.__xonsh_execer__ if self._execer is not None: self._execer = None elif self._execer is None: @@ -72,19 +36,19 @@ def execer(self): # def find_spec(self, fullname, path, target=None): """Finds the spec for a xonsh module if it exists.""" - dot = "." + dot = '.' spec = None path = sys.path if path is None else path if dot not in fullname and dot not in path: path = [dot] + path name = fullname.rsplit(dot, 1)[-1] - fname = name + ".xsh" + fname = name + '.xsh' for p in path: - if not isinstance(p, str): + if not isinstance(p, string_types): continue - if not os.path.isdir(p) or not os.access(p, os.R_OK): + if not os.path.isdir(p): continue - if fname not in {x.name for x in scandir(p)}: + if fname not in os.listdir(p): continue spec = ModuleSpec(fullname, self) self._filenames[fullname] = os.path.join(p, fname) @@ -94,14 +58,6 @@ def find_spec(self, fullname, path, target=None): # # SourceLoader methods # - def create_module(self, spec): - """Create a xonsh module with the appropriate attributes.""" - mod = types.ModuleType(spec.name) - mod.__file__ = self.get_filename(spec.name) - mod.__loader__ = self - mod.__package__ = spec.parent or "" - return mod - def get_filename(self, fullname): """Returns the filename for a module's fullname.""" return self._filenames[fullname] @@ -112,15 +68,13 @@ def get_data(self, path): def get_code(self, fullname): """Gets the code object for a xonsh file.""" - filename = self.get_filename(fullname) + filename = self._filenames.get(fullname, None) if filename is None: msg = "xonsh file {0!r} could not be found".format(fullname) raise ImportError(msg) - with open(filename, "rb") as f: + with open(filename, 'r') as f: src = f.read() - enc = find_source_encoding(src) - src = src.decode(encoding=enc) - src = src if src.endswith("\n") else src + "\n" + src = src if src.endswith('\n') else src + '\n' execer = self.execer execer.filename = filename ctx = {} # dummy for modules @@ -128,192 +82,4 @@ def get_code(self, fullname): return code -# -# Import events -# -events.doc( - "on_import_pre_find_spec", - """ -on_import_pre_find_spec(fullname: str, path: str, target: module or None) -> None - -Fires before any import find_spec() calls have been executed. The parameters -here are the same as importlib.abc.MetaPathFinder.find_spec(). Namely, - -:``fullname``: The full name of the module to import. -:``path``: None if a top-level import, otherwise the ``__path__`` of the parent - package. -:``target``: Target module used to make a better guess about the package spec. -""", -) - -events.doc( - "on_import_post_find_spec", - """ -on_import_post_find_spec(spec, fullname, path, target) -> None - -Fires after all import find_spec() calls have been executed. The parameters -here the spec and the arguments importlib.abc.MetaPathFinder.find_spec(). Namely, - -:``spec``: A ModuleSpec object if the spec was found, or None if it was not. -:``fullname``: The full name of the module to import. -:``path``: None if a top-level import, otherwise the ``__path__`` of the parent - package. -:``target``: Target module used to make a better guess about the package spec. -""", -) - -events.doc( - "on_import_pre_create_module", - """ -on_import_pre_create_module(spec: ModuleSpec) -> None - -Fires right before a module is created by its loader. The only parameter -is the spec object. See importlib for more details. -""", -) - -events.doc( - "on_import_post_create_module", - """ -on_import_post_create_module(module: Module, spec: ModuleSpec) -> None - -Fires after a module is created by its loader but before the loader returns it. -The parameters here are the module object itself and the spec object. -See importlib for more details. -""", -) - -events.doc( - "on_import_pre_exec_module", - """ -on_import_pre_exec_module(module: Module) -> None - -Fires right before a module is executed by its loader. The only parameter -is the module itself. See importlib for more details. -""", -) - -events.doc( - "on_import_post_exec_module", - """ -on_import_post_create_module(module: Module) -> None - -Fires after a module is executed by its loader but before the loader returns it. -The only parameter is the module itself. See importlib for more details. -""", -) - - -def _should_dispatch_xonsh_import_event_loader(): - """Figures out if we should dispatch to a load event""" - return ( - len(events.on_import_pre_create_module) > 0 - or len(events.on_import_post_create_module) > 0 - or len(events.on_import_pre_exec_module) > 0 - or len(events.on_import_post_exec_module) > 0 - ) - - -class XonshImportEventHook(MetaPathFinder): - """Implements the import hook for firing xonsh events on import.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self._fullname_stack = [] - - @contextlib.contextmanager - def append_stack(self, fullname): - """A context manager for appending and then removing a name from the - fullname stack. - """ - self._fullname_stack.append(fullname) - yield - del self._fullname_stack[-1] - - # - # MetaPathFinder methods - # - def find_spec(self, fullname, path, target=None): - """Finds the spec for a xonsh module if it exists.""" - if fullname in reversed(self._fullname_stack): - # don't execute if we are already in the stack. - return None - npre = len(events.on_import_pre_find_spec) - npost = len(events.on_import_post_find_spec) - dispatch_load = _should_dispatch_xonsh_import_event_loader() - if npre > 0: - events.on_import_pre_find_spec.fire( - fullname=fullname, path=path, target=target - ) - elif npost == 0 and not dispatch_load: - # no events to fire, proceed normally and prevent recursion - return None - # now find the spec - with self.append_stack(fullname): - spec = importlib.util.find_spec(fullname) - # fire post event - if npost > 0: - events.on_import_post_find_spec.fire( - spec=spec, fullname=fullname, path=path, target=target - ) - if dispatch_load and spec is not None and hasattr(spec.loader, "create_module"): - spec.loader = XonshImportEventLoader(spec.loader) - return spec - - -class XonshImportEventLoader(Loader): - """A class that dispatches loader calls to another loader and fires relevant - xonsh events. - """ - - def __init__(self, loader): - self.loader = loader - - # - # Loader methods - # - def create_module(self, spec): - """Creates and returns the module object.""" - events.on_import_pre_create_module.fire(spec=spec) - mod = self.loader.create_module(spec) - events.on_import_post_create_module.fire(module=mod, spec=spec) - return mod - - def exec_module(self, module): - """Executes the module in its own namespace.""" - events.on_import_pre_exec_module.fire(module=module) - rtn = self.loader.exec_module(module) - events.on_import_post_exec_module.fire(module=module) - return rtn - - def load_module(self, fullname): - """Legacy module loading, provided for backwards compatibility.""" - return self.loader.load_module(fullname) - - def module_repr(self, module): - """Legacy module repr, provided for backwards compatibility.""" - return self.loader.module_repr(module) - - -def install_import_hooks(): - """ - Install Xonsh import hooks in ``sys.meta_path`` in order for ``.xsh`` files - to be importable and import events to be fired. - - Can safely be called many times, will be no-op if xonsh import hooks are - already present. - """ - found_imp = found_event = False - for hook in sys.meta_path: - if isinstance(hook, XonshImportHook): - found_imp = True - elif isinstance(hook, XonshImportEventHook): - found_event = True - if not found_imp: - sys.meta_path.append(XonshImportHook()) - if not found_event: - sys.meta_path.insert(0, XonshImportEventHook()) - - -# alias to deprecated name -install_hook = install_import_hooks +sys.meta_path.append(XonshImportHook()) diff --git a/xonsh/inspectors.py b/xonsh/inspectors.py index f14f8fc..c8caeff 100644 --- a/xonsh/inspectors.py +++ b/xonsh/inspectors.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """Tools for inspecting Python objects. This file was forked from the IPython project: @@ -9,77 +8,48 @@ * Copyright (c) 2001, Nathaniel Gray """ import os -import io import sys import types import inspect -import itertools import linecache -import collections +import io as stdlib_io -from xonsh.lazyasd import LazyObject -from xonsh.tokenize import detect_encoding -from xonsh.openpy import read_py_file -from xonsh.tools import cast_unicode, safe_hasattr, indent, print_color, format_color -from xonsh.platform import HAS_PYGMENTS, PYTHON_VERSION_INFO -from xonsh.lazyimps import pygments, pyghooks -from xonsh.style_tools import partial_color_tokenize +from xonsh import openpy +from xonsh.tools import cast_unicode, safe_hasattr, string_types, indent + +if sys.version_info[0] > 2: + ISPY3K = True + from itertools import zip_longest +else: + ISPY3K = False + from itertools import izip_longest as zip_longest # builtin docstrings to ignore -_func_call_docstring = LazyObject( - lambda: types.FunctionType.__call__.__doc__, globals(), "_func_call_docstring" -) -_object_init_docstring = LazyObject( - lambda: object.__init__.__doc__, globals(), "_object_init_docstring" -) -_builtin_type_docstrings = LazyObject( - lambda: { - t.__doc__ for t in (types.ModuleType, types.MethodType, types.FunctionType) - }, - globals(), - "_builtin_type_docstrings", -) - -_builtin_func_type = LazyObject(lambda: type(all), globals(), "_builtin_func_type") -# Bound methods have the same type as builtin functions -_builtin_meth_type = LazyObject( - lambda: type(str.upper), globals(), "_builtin_meth_type" -) - -info_fields = LazyObject( - lambda: [ - "type_name", - "base_class", - "string_form", - "namespace", - "length", - "file", - "definition", - "docstring", - "source", - "init_definition", - "class_docstring", - "init_docstring", - "call_def", - "call_docstring", - # These won't be printed but will be used to determine how to - # format the object - "ismagic", - "isalias", - "isclass", - "argspec", - "found", - "name", - ], - globals(), - "info_fields", -) +_func_call_docstring = types.FunctionType.__call__.__doc__ +_object_init_docstring = object.__init__.__doc__ +_builtin_type_docstrings = { + t.__doc__ + for t in (types.ModuleType, types.MethodType, types.FunctionType) +} + +_builtin_func_type = type(all) +_builtin_meth_type = type( + str.upper) # Bound methods have the same type as builtin functions + +info_fields = [ + 'type_name', 'base_class', 'string_form', 'namespace', 'length', 'file', + 'definition', 'docstring', 'source', 'init_definition', 'class_docstring', + 'init_docstring', 'call_def', 'call_docstring', + # These won't be printed but will be used to determine how to + # format the object + 'ismagic', 'isalias', 'isclass', 'argspec', 'found', 'name' +] def object_info(**kw): """Make an object info dict with all fields present.""" - infodict = dict(itertools.zip_longest(info_fields, [None])) + infodict = dict(zip_longest(info_fields, [None])) infodict.update(kw) return infodict @@ -95,7 +65,7 @@ def get_encoding(obj): # filesystem. if ofile is None: return None - elif ofile.endswith((".so", ".dll", ".pyd")): + elif ofile.endswith(('.so', '.dll', '.pyd')): return None elif not os.path.isfile(ofile): return None @@ -103,8 +73,8 @@ def get_encoding(obj): # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. - with io.open(ofile, "rb") as buf: # Tweaked to use io.open for Python 2 - encoding, _ = detect_encoding(buf.readline) + with stdlib_io.open(ofile, 'rb') as buf: # Tweaked to use io.open for Python 2 + encoding, _ = openpy.detect_encoding(buf.readline) return encoding @@ -123,7 +93,7 @@ def getdoc(obj): pass else: # if we get extra info, we add it to the normal docstring. - if isinstance(ds, str): + if isinstance(ds, string_types): return inspect.cleandoc(ds) try: @@ -161,7 +131,7 @@ def getsource(obj, is_binary=False): try: src = inspect.getsource(obj) except TypeError: - if hasattr(obj, "__class__"): + if hasattr(obj, '__class__'): src = inspect.getsource(obj.__class__) encoding = get_encoding(obj) return cast_unicode(src, encoding=encoding) @@ -169,12 +139,10 @@ def getsource(obj, is_binary=False): def is_simple_callable(obj): """True if obj is a function ()""" - return ( - inspect.isfunction(obj) - or inspect.ismethod(obj) - or isinstance(obj, _builtin_func_type) - or isinstance(obj, _builtin_meth_type) - ) + return (inspect.isfunction(obj) or + inspect.ismethod(obj) or + isinstance(obj, _builtin_func_type) or + isinstance(obj, _builtin_meth_type)) def getargspec(obj): @@ -184,10 +152,10 @@ def getargspec(obj): In addition to functions and methods, this can also handle objects with a ``__call__`` attribute. """ - if safe_hasattr(obj, "__call__") and not is_simple_callable(obj): + if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): obj = obj.__call__ - return inspect.getfullargspec(obj) + return inspect.getfullargspec(obj) if ISPY3K else inspect.getargspec(obj) def format_argspec(argspec): @@ -196,9 +164,8 @@ def format_argspec(argspec): This takes a dict instead of ordered arguments and calls inspect.format_argspec with the arguments in the necessary order. """ - return inspect.formatargspec( - argspec["args"], argspec["varargs"], argspec["varkw"], argspec["defaults"] - ) + return inspect.formatargspec(argspec['args'], argspec['varargs'], + argspec['varkw'], argspec['defaults']) def call_tip(oinfo, format_call=True): @@ -215,7 +182,7 @@ def call_tip(oinfo, format_call=True): Returns ------- call_info : None, str or (str, dict) tuple. - When format_call is True, the whole call information is formatted as a + When format_call is True, the whole call information is formattted as a single string. Otherwise, the object's name and its argspec dict are returned. If no call information is available, None is returned. @@ -226,7 +193,7 @@ def call_tip(oinfo, format_call=True): (regular functions). """ # Get call definition - argspec = oinfo.get("argspec") + argspec = oinfo.get('argspec') if argspec is None: call_line = None else: @@ -234,22 +201,22 @@ def call_tip(oinfo, format_call=True): # it out if it's there for clarity (since users do *not* pass an # extra first argument explicitly). try: - has_self = argspec["args"][0] == "self" + has_self = argspec['args'][0] == 'self' except (KeyError, IndexError): pass else: if has_self: - argspec["args"] = argspec["args"][1:] + argspec['args'] = argspec['args'][1:] - call_line = oinfo["name"] + format_argspec(argspec) + call_line = oinfo['name'] + format_argspec(argspec) # Now get docstring. # The priority is: call docstring, constructor docstring, main one. - doc = oinfo.get("call_docstring") + doc = oinfo.get('call_docstring') if doc is None: - doc = oinfo.get("init_docstring") + doc = oinfo.get('init_docstring') if doc is None: - doc = oinfo.get("docstring", "") + doc = oinfo.get('docstring', '') return call_line, doc @@ -271,7 +238,7 @@ def find_file(obj): The absolute path to the file where the object was defined. """ # get source if obj was decorated with @decorator - if safe_hasattr(obj, "__wrapped__"): + if safe_hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ fname = None @@ -280,7 +247,7 @@ def find_file(obj): except TypeError: # For an instance, the file that matters is where its class was # declared. - if hasattr(obj, "__class__"): + if hasattr(obj, '__class__'): try: fname = inspect.getabsfile(obj.__class__) except TypeError: @@ -308,7 +275,7 @@ def find_source_lines(obj): The line number where the object definition starts. """ # get source if obj was decorated with @decorator - if safe_hasattr(obj, "__wrapped__"): + if safe_hasattr(obj, '__wrapped__'): obj = obj.__wrapped__ try: @@ -316,7 +283,7 @@ def find_source_lines(obj): lineno = inspect.getsourcelines(obj)[1] except TypeError: # For instances, try the class object like getsource() does - if hasattr(obj, "__class__"): + if hasattr(obj, '__class__'): lineno = inspect.getsourcelines(obj.__class__)[1] else: lineno = None @@ -326,70 +293,57 @@ def find_source_lines(obj): return lineno -if PYTHON_VERSION_INFO < (3, 5, 0): - FrameInfo = collections.namedtuple( - "FrameInfo", - ["frame", "filename", "lineno", "function", "code_context", "index"], - ) - - def getouterframes(frame, context=1): - """Wrapper for getouterframes so that it acts like the Python v3.5 version.""" - return [FrameInfo(*f) for f in inspect.getouterframes(frame, context=context)] - - -else: - getouterframes = inspect.getouterframes - - class Inspector(object): """Inspects objects.""" def __init__(self, str_detail_level=0): self.str_detail_level = str_detail_level - def _getdef(self, obj, oname=""): + def _getdef(self, obj, oname=''): """Return the call signature for any callable object. If any exception is generated, None is returned instead and the exception is suppressed. """ try: - hdef = oname + inspect.signature(*getargspec(obj)) + hdef = oname + inspect.formatargspec(*getargspec(obj)) return cast_unicode(hdef) except: # pylint:disable=bare-except return None def noinfo(self, msg, oname): """Generic message when no information is found.""" - print("No %s found" % msg, end=" ") + print('No %s found' % msg, end=' ') if oname: - print("for %s" % oname) + print('for %s' % oname) else: print() - def pdef(self, obj, oname=""): + def pdef(self, obj, oname=''): """Print the call signature for any callable object. If the object is a class, print the constructor information. """ if not callable(obj): - print("Object is not callable.") + print('Object is not callable.') return - header = "" + header = '' if inspect.isclass(obj): - header = self.__head("Class constructor information:\n") + header = self.__head('Class constructor information:\n') obj = obj.__init__ + elif (not ISPY3K) and type(obj) is types.InstanceType: + obj = obj.__call__ output = self._getdef(obj, oname) if output is None: - self.noinfo("definition header", oname) + self.noinfo('definition header', oname) else: - print(header, output, end=" ", file=sys.stdout) + print(header, output, end=' ', file=sys.stdout) - def pdoc(self, obj, oname=""): + def pdoc(self, obj, oname=''): """Print the docstring for any object. Optional @@ -404,57 +358,57 @@ def pdoc(self, obj, oname=""): if ds: lines.append(head("Class docstring:")) lines.append(indent(ds)) - if inspect.isclass(obj) and hasattr(obj, "__init__"): + if inspect.isclass(obj) and hasattr(obj, '__init__'): init_ds = getdoc(obj.__init__) if init_ds is not None: lines.append(head("Init docstring:")) lines.append(indent(init_ds)) - elif hasattr(obj, "__call__"): + elif hasattr(obj, '__call__'): call_ds = getdoc(obj.__call__) if call_ds: lines.append(head("Call docstring:")) lines.append(indent(call_ds)) if not lines: - self.noinfo("documentation", oname) + self.noinfo('documentation', oname) else: - print("\n".join(lines)) + print('\n'.join(lines)) - def psource(self, obj, oname=""): + def psource(self, obj, oname=''): """Print the source code for an object.""" # Flush the source cache because inspect can return out-of-date source linecache.checkcache() try: src = getsource(obj) except: # pylint:disable=bare-except - self.noinfo("source", oname) + self.noinfo('source', oname) else: print(src) - def pfile(self, obj, oname=""): + def pfile(self, obj, oname=''): """Show the whole file where an object was defined.""" lineno = find_source_lines(obj) if lineno is None: - self.noinfo("file", oname) + self.noinfo('file', oname) return ofile = find_file(obj) # run contents of file through pager starting at line where the object # is defined, as long as the file isn't binary and is actually on the # filesystem. - if ofile.endswith((".so", ".dll", ".pyd")): - print("File %r is binary, not printing." % ofile) + if ofile.endswith(('.so', '.dll', '.pyd')): + print('File %r is binary, not printing.' % ofile) elif not os.path.isfile(ofile): - print("File %r does not exist, not printing." % ofile) + print('File %r does not exist, not printing.' % ofile) else: # Print only text files, not extension binaries. Note that # getsourcelines returns lineno with 1-offset and page() uses # 0-offset, so we must adjust. - o = read_py_file(ofile, skip_encoding_cookie=False) + o = openpy.read_py_file(ofile, skip_encoding_cookie=False) print(o, lineno - 1) - def _format_fields_str(self, fields, title_width=0): - """Formats a list of fields for display using color strings. + def _format_fields(self, fields, title_width=0): + """Formats a list of fields for display. Parameters ---------- @@ -467,81 +421,28 @@ def _format_fields_str(self, fields, title_width=0): if title_width == 0: title_width = max(len(title) + 2 for title, _ in fields) for title, content in fields: - title_len = len(title) - title = "{BOLD_RED}" + title + ":{NO_COLOR}" if len(content.splitlines()) > 1: - title += "\n" + title = title + ":\n" else: - title += " ".ljust(title_width - title_len) + title = (title + ":").ljust(title_width) out.append(cast_unicode(title) + cast_unicode(content)) - return format_color("\n".join(out) + "\n") - - def _format_fields_tokens(self, fields, title_width=0): - """Formats a list of fields for display using color tokens from - pygments. - - Parameters - ---------- - fields : list - A list of 2-tuples: (field_title, field_content) - title_width : int - How many characters to pad titles to. Default to longest title. - """ - out = [] - if title_width == 0: - title_width = max(len(title) + 2 for title, _ in fields) - for title, content in fields: - title_len = len(title) - title = "{BOLD_RED}" + title + ":{NO_COLOR}" - if not isinstance(content, str) or len(content.splitlines()) > 1: - title += "\n" - else: - title += " ".ljust(title_width - title_len) - out += partial_color_tokenize(title) - if isinstance(content, str): - out[-1] = (out[-1][0], out[-1][1] + content + "\n") - else: - out += content - out[-1] = (out[-1][0], out[-1][1] + "\n") - out[-1] = (out[-1][0], out[-1][1] + "\n") - return out - - def _format_fields(self, fields, title_width=0): - """Formats a list of fields for display using color tokens from - pygments. - - Parameters - ---------- - fields : list - A list of 2-tuples: (field_title, field_content) - title_width : int - How many characters to pad titles to. Default to longest title. - """ - if HAS_PYGMENTS: - rtn = self._format_fields_tokens(fields, title_width=title_width) - else: - rtn = self._format_fields_str(fields, title_width=title_width) - return rtn + return "\n".join(out) # The fields to be displayed by pinfo: (fancy_name, key_in_info_dict) pinfo_fields1 = [("Type", "type_name")] pinfo_fields2 = [("String form", "string_form")] - pinfo_fields3 = [ - ("Length", "length"), - ("File", "file"), - ("Definition", "definition"), - ] + pinfo_fields3 = [("Length", "length"), + ("File", "file"), + ("Definition", "definition"), ] - pinfo_fields_obj = [ - ("Class docstring", "class_docstring"), - ("Init docstring", "init_docstring"), - ("Call def", "call_def"), - ("Call docstring", "call_docstring"), - ] + pinfo_fields_obj = [("Class docstring", "class_docstring"), + ("Init docstring", "init_docstring"), + ("Call def", "call_def"), + ("Call docstring", "call_docstring"), ] - def pinfo(self, obj, oname="", info=None, detail_level=0): + def pinfo(self, obj, oname='', info=None, detail_level=0): """Show detailed information about an object. Parameters @@ -555,7 +456,10 @@ def pinfo(self, obj, oname="", info=None, detail_level=0): detail_level : int, optional if set to 1, more information is given. """ - info = self.info(obj, oname=oname, info=info, detail_level=detail_level) + info = self.info(obj, + oname=oname, + info=info, + detail_level=detail_level) displayfields = [] def add_fields(fields): @@ -565,27 +469,38 @@ def add_fields(fields): displayfields.append((title, field.rstrip())) add_fields(self.pinfo_fields1) + + # Base class for old-style instances + if ((not ISPY3K) and + isinstance(obj, types.InstanceType) and + info['base_class']): + o = ("Base Class", info['base_class'].rstrip()) + displayfields.append(o) + add_fields(self.pinfo_fields2) # Namespace - if info["namespace"] is not None and info["namespace"] != "Interactive": - displayfields.append(("Namespace", info["namespace"].rstrip())) + if (info['namespace'] is not None and + info['namespace'] != 'Interactive'): + displayfields.append(("Namespace", info['namespace'].rstrip())) add_fields(self.pinfo_fields3) - if info["isclass"] and info["init_definition"]: - displayfields.append(("Init definition", info["init_definition"].rstrip())) + if info['isclass'] and info['init_definition']: + displayfields.append(("Init definition", + info['init_definition'].rstrip())) # Source or docstring, depending on detail level and whether # source found. - if detail_level > 0 and info["source"] is not None: - displayfields.append(("Source", cast_unicode(info["source"]))) - elif info["docstring"] is not None: + if detail_level > 0 and info['source'] is not None: + displayfields.append(("Source", cast_unicode(info['source']))) + elif info['docstring'] is not None: displayfields.append(("Docstring", info["docstring"])) # Constructor info for classes - if info["isclass"]: - if info["init_docstring"] is not None: - displayfields.append(("Init docstring", info["init_docstring"])) + if info['isclass']: + if info['init_docstring'] is not None: + displayfields.append(("Init docstring", + info['init_docstring'])) # Info for objects: else: @@ -593,9 +508,9 @@ def add_fields(fields): # Finally send to printer/pager: if displayfields: - print_color(self._format_fields(displayfields)) + print(self._format_fields(displayfields)) - def info(self, obj, oname="", info=None, detail_level=0): + def info(self, obj, oname='', info=None, detail_level=0): """Compute a dict with detailed information about an object. Optional arguments: @@ -611,7 +526,7 @@ def info(self, obj, oname="", info=None, detail_level=0): if info is None: ismagic = 0 isalias = 0 - ospace = "" + ospace = '' else: ismagic = info.ismagic isalias = info.isalias @@ -619,7 +534,7 @@ def info(self, obj, oname="", info=None, detail_level=0): # Get docstring, special-casing aliases: if isalias: if not callable(obj): - if len(obj) >= 2 and isinstance(obj[1], str): + if len(obj) >= 2 and isinstance(obj[1], string_types): ds = "Alias to the system command:\n {0}".format(obj[1]) else: # pylint:disable=bare-except ds = "Alias: " + str(obj) @@ -630,7 +545,7 @@ def info(self, obj, oname="", info=None, detail_level=0): else: ds = getdoc(obj) if ds is None: - ds = "" + ds = '' # store output in a dict, we initialize it here and fill it as we go out = dict(name=oname, found=True, isalias=isalias, ismagic=ismagic) @@ -639,16 +554,16 @@ def info(self, obj, oname="", info=None, detail_level=0): shalf = int((string_max - 5) / 2) if ismagic: - obj_type_name = "Magic function" + obj_type_name = 'Magic function' elif isalias: - obj_type_name = "System alias" + obj_type_name = 'System alias' else: obj_type_name = obj_type.__name__ - out["type_name"] = obj_type_name + out['type_name'] = obj_type_name try: bclass = obj.__class__ - out["base_class"] = str(bclass) + out['base_class'] = str(bclass) except: # pylint:disable=bare-except pass @@ -656,22 +571,21 @@ def info(self, obj, oname="", info=None, detail_level=0): if detail_level >= self.str_detail_level: try: ostr = str(obj) - str_head = "string_form" + str_head = 'string_form' if not detail_level and len(ostr) > string_max: - ostr = ostr[:shalf] + " <...> " + ostr[-shalf:] - ostr = ("\n" + " " * len(str_head.expandtabs())).join( - q.strip() for q in ostr.split("\n") - ) + ostr = ostr[:shalf] + ' <...> ' + ostr[-shalf:] + ostr = ("\n" + " " * len(str_head.expandtabs())).\ + join(q.strip() for q in ostr.split("\n")) out[str_head] = ostr except: # pylint:disable=bare-except pass if ospace: - out["namespace"] = ospace + out['namespace'] = ospace # Length (for strings and lists) try: - out["length"] = str(len(obj)) + out['length'] = str(len(obj)) except: # pylint:disable=bare-except pass @@ -683,16 +597,17 @@ def info(self, obj, oname="", info=None, detail_level=0): # if the file was binary binary_file = True else: - if fname.endswith((".so", ".dll", ".pyd")): + if fname.endswith(('.so', '.dll', '.pyd')): binary_file = True - elif fname.endswith(""): - fname = "Dynamically generated function. " "No source code available." - out["file"] = fname + elif fname.endswith(''): + fname = ('Dynamically generated function. ' + 'No source code available.') + out['file'] = fname # Docstrings only in detail 0 mode, since source contains them (we # avoid repetitions). If source fails, we add them back, see below. if ds and detail_level == 0: - out["docstring"] = ds + out['docstring'] = ds # Original source code for any callable if detail_level: @@ -704,23 +619,19 @@ def info(self, obj, oname="", info=None, detail_level=0): try: source = getsource(obj, binary_file) except TypeError: - if hasattr(obj, "__class__"): + if hasattr(obj, '__class__'): source = getsource(obj.__class__, binary_file) if source is not None: - source = source.rstrip() - if HAS_PYGMENTS: - lexer = pyghooks.XonshLexer() - source = list(pygments.lex(source, lexer=lexer)) - out["source"] = source + out['source'] = source.rstrip() except Exception: # pylint:disable=broad-except pass if ds and source is None: - out["docstring"] = ds + out['docstring'] = ds # Constructor docstring for classes if inspect.isclass(obj): - out["isclass"] = True + out['isclass'] = True # reconstruct the function definition and print it: try: obj_init = obj.__init__ @@ -735,16 +646,16 @@ def info(self, obj, oname="", info=None, detail_level=0): if init_def or init_ds: if init_def: - out["init_definition"] = init_def + out['init_definition'] = init_def if init_ds: - out["init_docstring"] = init_ds + out['init_docstring'] = init_ds # and class docstring for instances: else: # reconstruct the function definition and print it: defln = self._getdef(obj, oname) if defln: - out["definition"] = defln + out['definition'] = defln # First, check whether the instance docstring is identical to the # class one, and print it separately if they don't coincide. In @@ -752,7 +663,7 @@ def info(self, obj, oname="", info=None, detail_level=0): # objects which use instance-customized docstrings. if ds: try: - cls = getattr(obj, "__class__") + cls = getattr(obj, '__class__') except: # pylint:disable=bare-except class_ds = None else: @@ -761,7 +672,7 @@ def info(self, obj, oname="", info=None, detail_level=0): if class_ds in _builtin_type_docstrings: class_ds = None if class_ds and ds != class_ds: - out["class_docstring"] = class_ds + out['class_docstring'] = class_ds # Next, try to show constructor docstrings try: @@ -772,23 +683,23 @@ def info(self, obj, oname="", info=None, detail_level=0): except AttributeError: init_ds = None if init_ds: - out["init_docstring"] = init_ds + out['init_docstring'] = init_ds # Call form docstring for callable instances - if safe_hasattr(obj, "__call__") and not is_simple_callable(obj): + if safe_hasattr(obj, '__call__') and not is_simple_callable(obj): call_def = self._getdef(obj.__call__, oname) if call_def: call_def = call_def # it may never be the case that call def and definition # differ, but don't include the same signature twice - if call_def != out.get("definition"): - out["call_def"] = call_def + if call_def != out.get('definition'): + out['call_def'] = call_def call_ds = getdoc(obj.__call__) # Skip Python's auto-generated docstrings if call_ds == _func_call_docstring: call_ds = None if call_ds: - out["call_docstring"] = call_ds + out['call_docstring'] = call_ds # Compute the object's argspec as a callable. The key is to decide # whether to pull it from the object itself, from its __init__ or @@ -811,10 +722,10 @@ def info(self, obj, oname="", info=None, detail_level=0): else: # named tuples' _asdict() method returns an OrderedDict, but we # we want a normal - out["argspec"] = argspec_dict = dict(argspec._asdict()) + out['argspec'] = argspec_dict = dict(argspec._asdict()) # We called this varkw before argspec became a named tuple. # With getfullargspec it's also called varkw. - if "varkw" not in argspec_dict: - argspec_dict["varkw"] = argspec_dict.pop("keywords") + if 'varkw' not in argspec_dict: + argspec_dict['varkw'] = argspec_dict.pop('keywords') return object_info(**out) diff --git a/xonsh/jobs.py b/xonsh/jobs.py index 24ac86f..d83f705 100644 --- a/xonsh/jobs.py +++ b/xonsh/jobs.py @@ -1,267 +1,167 @@ -# -*- coding: utf-8 -*- -"""Job control for the xonsh shell.""" +""" +Job control for the xonsh shell. +""" import os import sys import time -import ctypes import signal import builtins -import subprocess -import collections +from subprocess import TimeoutExpired -from xonsh.lazyasd import LazyObject -from xonsh.platform import FD_STDERR, ON_DARWIN, ON_WINDOWS, ON_CYGWIN, ON_MSYS, LIBC -from xonsh.tools import unthreadable +from xonsh.tools import ON_WINDOWS - -tasks = LazyObject(collections.deque, globals(), "tasks") -# Track time stamp of last exit command, so that two consecutive attempts to -# exit can kill all jobs and exit. -_last_exit_time = None - - -if ON_DARWIN: - - def _send_signal(job, signal): - # On OS X, os.killpg() may cause PermissionError when there are - # any zombie processes in the process group. - # See github issue #1012 for details - for pid in job["pids"]: - if pid is None: # the pid of an aliased proc is None - continue - try: - os.kill(pid, signal) - except ProcessLookupError: - pass - - -elif ON_WINDOWS: - pass -elif ON_CYGWIN or ON_MSYS: - # Similar to what happened on OSX, more issues on Cygwin - # (see Github issue #514). - def _send_signal(job, signal): - try: - os.killpg(job["pgrp"], signal) - except Exception: - for pid in job["pids"]: - try: - os.kill(pid, signal) - except Exception: - pass - - -else: - - def _send_signal(job, signal): - pgrp = job["pgrp"] - if pgrp is None: - for pid in job["pids"]: - try: - os.kill(pid, signal) - except Exception: - pass - else: - os.killpg(job["pgrp"], signal) +try: + _shell_tty = sys.stderr.fileno() +except OSError: + _shell_tty = None if ON_WINDOWS: + def _continue(obj): + return None + - def _continue(job): - job["status"] = "running" + def _kill(obj): + return obj.kill() - def _kill(job): - subprocess.check_output(["taskkill", "/F", "/T", "/PID", str(job["obj"].pid)]) def ignore_sigtstp(): pass - def give_terminal_to(pgid): + + def _set_pgrp(info): pass - def wait_for_active_job(last_task=None, backgrounded=False): + def wait_for_active_job(signal_to_send=None): """ Wait for the active job to finish, to be killed by SIGINT, or to be suspended by ctrl-z. """ _clear_dead_jobs() - active_task = get_next_task() - # Return when there are no foreground active task - if active_task is None: - return last_task - obj = active_task["obj"] - _continue(active_task) + act = builtins.__xonsh_active_job__ + if act is None: + return + job = builtins.__xonsh_all_jobs__[act] + obj = job['obj'] + if job['bg']: + return while obj.returncode is None: try: obj.wait(0.01) - except subprocess.TimeoutExpired: + except TimeoutExpired: pass except KeyboardInterrupt: - _kill(active_task) - return wait_for_active_job(last_task=active_task) - + obj.kill() + if obj.poll() is not None: + builtins.__xonsh_active_job__ = None else: + def _continue(obj): + return signal.SIGCONT - def _continue(job): - _send_signal(job, signal.SIGCONT) - def _kill(job): - _send_signal(job, signal.SIGKILL) + def _kill(obj): + os.kill(obj.pid, signal.SIGKILL) + def ignore_sigtstp(): signal.signal(signal.SIGTSTP, signal.SIG_IGN) - _shell_pgrp = os.getpgrp() - - _block_when_giving = LazyObject( - lambda: (signal.SIGTTOU, signal.SIGTTIN, signal.SIGTSTP, signal.SIGCHLD), - globals(), - "_block_when_giving", - ) - - if ON_CYGWIN or ON_MSYS: - # on cygwin, signal.pthread_sigmask does not exist in Python, even - # though pthread_sigmask is defined in the kernel. thus, we use - # ctypes to mimic the calls in the "normal" version below. - LIBC.pthread_sigmask.restype = ctypes.c_int - LIBC.pthread_sigmask.argtypes = [ - ctypes.c_int, - ctypes.POINTER(ctypes.c_ulong), - ctypes.POINTER(ctypes.c_ulong), - ] - - def _pthread_sigmask(how, signals): - mask = 0 - for sig in signals: - mask |= 1 << sig - oldmask = ctypes.c_ulong() - mask = ctypes.c_ulong(mask) - result = LIBC.pthread_sigmask( - how, ctypes.byref(mask), ctypes.byref(oldmask) - ) - if result: - raise OSError(result, "Sigmask error.") - - return { - sig - for sig in getattr(signal, "Signals", range(0, 65)) - if (oldmask.value >> sig) & 1 - } - else: - _pthread_sigmask = signal.pthread_sigmask - - # give_terminal_to is a simplified version of: - # give_terminal_to from bash 4.3 source, jobs.c, line 4030 - # this will give the terminal to the process group pgid - def give_terminal_to(pgid): - if pgid is None: - return False - oldmask = _pthread_sigmask(signal.SIG_BLOCK, _block_when_giving) + def _set_pgrp(info): try: - os.tcsetpgrp(FD_STDERR, pgid) - return True + info['pgrp'] = os.getpgid(info['obj'].pid) except ProcessLookupError: - # when the process finished before giving terminal to it, - # see issue #2288 - return False - except OSError as e: - if e.errno == 22: # [Errno 22] Invalid argument - # there are cases that all the processes of pgid have - # finished, then we don't need to do anything here, see - # issue #2220 - return False - elif e.errno == 25: # [Errno 25] Inappropriate ioctl for device - # There are also cases where we are not connected to a - # real TTY, even though we may be run in interactive - # mode. See issue #2267 for an example with emacs - return False - else: - raise - finally: - if oldmask: - # only reset the mask if it is non-empty! See #2989 - _pthread_sigmask(signal.SIG_SETMASK, oldmask) - - def wait_for_active_job(last_task=None, backgrounded=False, _nochild=False): + pass + + + _shell_pgrp = os.getpgrp() + + _block_when_giving = (signal.SIGTTOU, signal.SIGTTIN, signal.SIGTSTP) + + + def _give_terminal_to(pgid): + # over-simplified version of: + # give_terminal_to from bash 4.3 source, jobs.c, line 4030 + # this will give the terminal to the process group pgid + if _shell_tty is not None and os.isatty(_shell_tty): + oldmask = signal.pthread_sigmask(signal.SIG_BLOCK, _block_when_giving) + os.tcsetpgrp(_shell_tty, pgid) + signal.pthread_sigmask(signal.SIG_SETMASK, oldmask) + + + def wait_for_active_job(signal_to_send=None): """ Wait for the active job to finish, to be killed by SIGINT, or to be suspended by ctrl-z. """ _clear_dead_jobs() - active_task = get_next_task() - # Return when there are no foreground active task - if active_task is None: - return last_task - obj = active_task["obj"] - backgrounded = False - try: - _, wcode = os.waitpid(obj.pid, os.WUNTRACED) - except ChildProcessError: # No child processes - if _nochild: - return active_task - else: - return wait_for_active_job( - last_task=active_task, backgrounded=backgrounded, _nochild=True - ) - if os.WIFSTOPPED(wcode): - print("^Z") - active_task["status"] = "stopped" - backgrounded = True - elif os.WIFSIGNALED(wcode): + act = builtins.__xonsh_active_job__ + if act is None: + return + job = builtins.__xonsh_all_jobs__[act] + obj = job['obj'] + if job['bg']: + return + pgrp = job['pgrp'] + obj.done = False + + # give the terminal over to the fg process + _give_terminal_to(pgrp) + # if necessary, send the specified signal to this process + # (this hook was added because vim, emacs, etc, seem to need to have + # the terminal when they receive SIGCONT from the "fg" command) + if signal_to_send is not None: + os.kill(obj.pid, signal_to_send) + _, s = os.waitpid(obj.pid, os.WUNTRACED) + if os.WIFSTOPPED(s): + obj.done = True + job['bg'] = True + job['status'] = 'stopped' + print() # get a newline because ^Z will have been printed + print_one_job(act) + elif os.WIFSIGNALED(s): print() # get a newline because ^C will have been printed - obj.signal = (os.WTERMSIG(wcode), os.WCOREDUMP(wcode)) - obj.returncode = None - else: - obj.returncode = os.WEXITSTATUS(wcode) - obj.signal = None - return wait_for_active_job(last_task=active_task, backgrounded=backgrounded) - - -def get_next_task(): - """ Get the next active task and put it on top of the queue""" - selected_task = None - for tid in tasks: - task = get_task(tid) - if not task["bg"] and task["status"] == "running": - selected_task = tid - break - if selected_task is None: - return - tasks.remove(selected_task) - tasks.appendleft(selected_task) - return get_task(selected_task) - - -def get_task(tid): - return builtins.__xonsh__.all_jobs[tid] + if obj.poll() is not None: + builtins.__xonsh_active_job__ = None + _give_terminal_to(_shell_pgrp) # give terminal back to the shell def _clear_dead_jobs(): to_remove = set() - for tid in tasks: - obj = get_task(tid)["obj"] - if obj is None or obj.poll() is not None: - to_remove.add(tid) - for job in to_remove: - tasks.remove(job) - del builtins.__xonsh__.all_jobs[job] + for num, job in builtins.__xonsh_all_jobs__.items(): + obj = job['obj'] + if obj.poll() is not None: + to_remove.add(num) + for i in to_remove: + del builtins.__xonsh_all_jobs__[i] + if builtins.__xonsh_active_job__ == i: + builtins.__xonsh_active_job__ = None + if builtins.__xonsh_active_job__ is None: + _reactivate_job() + + +def _reactivate_job(): + if len(builtins.__xonsh_all_jobs__) == 0: + return + builtins.__xonsh_active_job__ = max(builtins.__xonsh_all_jobs__.items(), + key=lambda x: x[1]['started'])[0] + -def print_one_job(num, outfile=sys.stdout): +def print_one_job(num): """Print a line describing job number ``num``.""" try: - job = builtins.__xonsh__.all_jobs[num] + job = builtins.__xonsh_all_jobs__[num] except KeyError: return - pos = "+" if tasks[0] == num else "-" if tasks[1] == num else " " - status = job["status"] - cmd = [" ".join(i) if isinstance(i, list) else i for i in job["cmds"]] - cmd = " ".join(cmd) - pid = job["pids"][-1] - bg = " &" if job["bg"] else "" - print("[{}]{} {}: {}{} ({})".format(num, pos, status, cmd, bg, pid), file=outfile) + act = '*' if num == builtins.__xonsh_active_job__ else ' ' + status = job['status'] + cmd = [' '.join(i) if isinstance(i, list) else i for i in job['cmds']] + cmd = ' '.join(cmd) + pid = job['pids'][-1] + bg = ' &' if job['bg'] else '' + print('{}[{}] {}: {}{} ({})'.format(act, num, status, cmd, bg, pid)) def get_next_job_number(): @@ -269,72 +169,27 @@ def get_next_job_number(): """ _clear_dead_jobs() i = 1 - while i in builtins.__xonsh__.all_jobs: + while i in builtins.__xonsh_all_jobs__: i += 1 return i def add_job(info): - """Add a new job to the jobs dictionary.""" + """ + Add a new job to the jobs dictionary. + """ + info['started'] = time.time() + info['status'] = 'running' + _set_pgrp(info) num = get_next_job_number() - info["started"] = time.time() - info["status"] = "running" - tasks.appendleft(num) - builtins.__xonsh__.all_jobs[num] = info - if info["bg"] and builtins.__xonsh__.env.get("XONSH_INTERACTIVE"): + builtins.__xonsh_all_jobs__[num] = info + builtins.__xonsh_active_job__ = num + if info['bg']: print_one_job(num) -def clean_jobs(): - """Clean up jobs for exiting shell - - In non-interactive mode, kill all jobs. - - In interactive mode, check for suspended or background jobs, print a - warning if any exist, and return False. Otherwise, return True. - """ - jobs_clean = True - if builtins.__xonsh__.env["XONSH_INTERACTIVE"]: - _clear_dead_jobs() - - if builtins.__xonsh__.all_jobs: - global _last_exit_time - hist = builtins.__xonsh__.history - if hist is not None and len(hist.tss) > 0: - last_cmd_start = hist.tss[-1][0] - else: - last_cmd_start = None - - if _last_exit_time and last_cmd_start and _last_exit_time > last_cmd_start: - # Exit occurred after last command started, so it was called as - # part of the last command and is now being called again - # immediately. Kill jobs and exit without reminder about - # unfinished jobs in this case. - kill_all_jobs() - else: - if len(builtins.__xonsh__.all_jobs) > 1: - msg = "there are unfinished jobs" - else: - msg = "there is an unfinished job" - - if builtins.__xonsh__.env["SHELL_TYPE"] != "prompt_toolkit": - # The Ctrl+D binding for prompt_toolkit already inserts a - # newline - print() - print("xonsh: {}".format(msg), file=sys.stderr) - print("-" * 5, file=sys.stderr) - jobs([], stdout=sys.stderr) - print("-" * 5, file=sys.stderr) - print( - 'Type "exit" or press "ctrl-d" again to force quit.', - file=sys.stderr, - ) - jobs_clean = False - _last_exit_time = time.time() - else: - kill_all_jobs() - - return jobs_clean +def _default_sigint_handler(num, frame): + raise KeyboardInterrupt def kill_all_jobs(): @@ -342,83 +197,77 @@ def kill_all_jobs(): Send SIGKILL to all child processes (called when exiting xonsh). """ _clear_dead_jobs() - for job in builtins.__xonsh__.all_jobs.values(): - _kill(job) + for job in builtins.__xonsh_all_jobs__.values(): + _kill(job['obj']) -def jobs(args, stdin=None, stdout=sys.stdout, stderr=None): +def jobs(args, stdin=None): """ xonsh command: jobs Display a list of all current jobs. """ _clear_dead_jobs() - for j in tasks: - print_one_job(j, outfile=stdout) + for j in sorted(builtins.__xonsh_all_jobs__): + print_one_job(j) return None, None -def resume_job(args, wording): +def fg(args, stdin=None): """ - used by fg and bg to resume a job either in the foreground or in the background. + xonsh command: fg + + Bring the currently active job to the foreground, or, if a single number is + given as an argument, bring that job to the foreground. """ _clear_dead_jobs() - if len(tasks) == 0: - return "", "There are currently no suspended jobs" - if len(args) == 0: - tid = tasks[0] # take the last manipulated task by default + # start active job in foreground + act = builtins.__xonsh_active_job__ + if act is None: + return '', 'Cannot bring nonexistent job to foreground.\n' elif len(args) == 1: try: - if args[0] == "+": # take the last manipulated task - tid = tasks[0] - elif args[0] == "-": # take the second to last manipulated task - tid = tasks[1] - else: - tid = int(args[0]) - except (ValueError, IndexError): - return "", "Invalid job: {}\n".format(args[0]) - - if tid not in builtins.__xonsh__.all_jobs: - return "", "Invalid job: {}\n".format(args[0]) + act = int(args[0]) + except ValueError: + return '', 'Invalid job: {}\n'.format(args[0]) + if act not in builtins.__xonsh_all_jobs__: + return '', 'Invalid job: {}\n'.format(args[0]) else: - return "", "{} expects 0 or 1 arguments, not {}\n".format(wording, len(args)) - - # Put this one on top of the queue - tasks.remove(tid) - tasks.appendleft(tid) - - job = get_task(tid) - job["bg"] = False - job["status"] = "running" - if builtins.__xonsh__.env.get("XONSH_INTERACTIVE"): - print_one_job(tid) - pipeline = job["pipeline"] - pipeline.resume(job) - - -@unthreadable -def fg(args, stdin=None): - """ - xonsh command: fg - - Bring the currently active job to the foreground, or, if a single number is - given as an argument, bring that job to the foreground. Additionally, - specify "+" for the most recent job and "-" for the second most recent job. - """ - return resume_job(args, wording="fg") + return '', 'fg expects 0 or 1 arguments, not {}\n'.format(len(args)) + builtins.__xonsh_active_job__ = act + job = builtins.__xonsh_all_jobs__[act] + job['bg'] = False + job['status'] = 'running' + print_one_job(act) + wait_for_active_job(_continue(job['obj'])) def bg(args, stdin=None): - """xonsh command: bg + """ + xonsh command: bg Resume execution of the currently active job in the background, or, if a single number is given as an argument, resume that job in the background. """ - res = resume_job(args, wording="bg") - if res is None: - curtask = get_task(tasks[0]) - curtask["bg"] = True - _continue(curtask) + _clear_dead_jobs() + if len(args) == 0: + # start active job in foreground + act = builtins.__xonsh_active_job__ + if act is None: + return '', 'Cannot send nonexistent job to background.\n' + elif len(args) == 1: + try: + act = int(args[0]) + except ValueError: + return '', 'Invalid job: {}\n'.format(args[0]) + if act not in builtins.__xonsh_all_jobs__: + return '', 'Invalid job: {}\n'.format(args[0]) else: - return res + return '', 'bg expects 0 or 1 arguments, not {}\n'.format(len(args)) + builtins.__xonsh_active_job__ = act + job = builtins.__xonsh_all_jobs__[act] + job['bg'] = True + job['status'] = 'running' + print_one_job(act) + wait_for_active_job(_continue(job['obj'])) diff --git a/xonsh/jsonutils.py b/xonsh/jsonutils.py deleted file mode 100644 index 0805d96..0000000 --- a/xonsh/jsonutils.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Custom tools for managing JSON serialization / deserialization of xonsh -objects. -""" -import functools - -from xonsh.tools import EnvPath - - -@functools.singledispatch -def serialize_xonsh_json(val): - """JSON serializer for xonsh custom data structures. This is only - called when another normal JSON types are not found. - """ - return str(val) - - -@serialize_xonsh_json.register(EnvPath) -def _serialize_xonsh_json_env_path(val): - return val.paths diff --git a/xonsh/jupyter_kernel.py b/xonsh/jupyter_kernel.py deleted file mode 100644 index 59551cb..0000000 --- a/xonsh/jupyter_kernel.py +++ /dev/null @@ -1,478 +0,0 @@ -# -*- coding: utf-8 -*- -"""Hooks for Jupyter Xonsh Kernel.""" -import sys -import json -import hmac -import uuid -import errno -import hashlib -import datetime -import builtins -import threading -from pprint import pformat -from argparse import ArgumentParser -from collections.abc import Set - -import zmq -from zmq.eventloop import ioloop, zmqstream -from zmq.error import ZMQError - -from xonsh import __version__ as version -from xonsh.main import setup -from xonsh.completer import Completer -from xonsh.commands_cache import predict_true - - -MAX_SIZE = 8388608 # 8 Mb -DELIM = b"" - - -def dump_bytes(*args, **kwargs): - """Converts an object to JSON and returns the bytes.""" - return json.dumps(*args, **kwargs).encode("ascii") - - -def load_bytes(b): - """Converts bytes of JSON to an object.""" - return json.loads(b.decode("ascii")) - - -def bind(socket, connection, port): - """Binds a socket to a port, or a random port if needed. Returns the port.""" - if port <= 0: - return socket.bind_to_random_port(connection) - else: - socket.bind("{}:{}".format(connection, port)) - return port - - -class XonshKernel: - """Xonsh xernal for Jupyter""" - - implementation = "Xonsh " + version - implementation_version = version - language = "xonsh" - language_version = version.split(".")[:3] - banner = "Xonsh - Python-powered, cross-platform shell" - language_info = { - "name": "xonsh", - "version": version, - "pygments_lexer": "xonsh", - "codemirror_mode": "shell", - "mimetype": "text/x-sh", - "file_extension": ".xsh", - } - signature_schemes = {"hmac-sha256": hashlib.sha256} - - def __init__(self, debug_level=0, session_id=None, config=None, **kwargs): - """ - Parameters - ---------- - debug_level : int, optional - Integer from 0 (no debugging) to 3 (all debugging), default: 0. - session_id : str or None, optional - Unique string id representing the kernel session. If None, this will - be replaced with a random UUID. - config : dict or None, optional - Configuration dictionary to start server with. BY default will - search the command line for options (if given) or use default - configuration. - """ - self.debug_level = debug_level - self.session_id = str(uuid.uuid4()) if session_id is None else session_id - self._parser = None - self.config = self.make_default_config() if config is None else config - - self.exiting = False - self.execution_count = 1 - self.completer = Completer() - - @property - def parser(self): - if self._parser is None: - p = ArgumentParser("jupyter_kerenel") - p.add_argument("-f", dest="config_file", default=None) - self._parser = p - return self._parser - - def make_default_config(self): - """Provides default configuration""" - ns, unknown = self.parser.parse_known_args(sys.argv) - if ns.config_file is None: - self.dprint(1, "Starting xonsh kernel with default args...") - config = { - "control_port": 0, - "hb_port": 0, - "iopub_port": 0, - "ip": "127.0.0.1", - "key": str(uuid.uuid4()), - "shell_port": 0, - "signature_scheme": "hmac-sha256", - "stdin_port": 0, - "transport": "tcp", - } - else: - self.dprint(1, "Loading simple_kernel with args:", sys.argv) - self.dprint(1, "Reading config file {!r}...".format(ns.config_file)) - with open(ns.config_file) as f: - config = json.load(f) - return config - - def iopub_handler(self, message): - """Handles iopub requests.""" - self.dprint(2, "iopub received:", message) - - def control_handler(self, wire_message): - """Handles control requests""" - self.dprint(1, "control received:", wire_message) - identities, msg = self.deserialize_wire_message(wire_message) - if msg["header"]["msg_type"] == "shutdown_request": - self.shutdown() - - def stdin_handler(self, message): - self.dprint(2, "stdin received:", message) - - def start(self): - """Starts the server""" - ioloop.install() - connection = self.config["transport"] + "://" + self.config["ip"] - secure_key = self.config["key"].encode() - digestmod = self.signature_schemes[self.config["signature_scheme"]] - self.auth = hmac.HMAC(secure_key, digestmod=digestmod) - - # Heartbeat - ctx = zmq.Context() - self.heartbeat_socket = ctx.socket(zmq.REP) - self.config["hb_port"] = bind( - self.heartbeat_socket, connection, self.config["hb_port"] - ) - - # IOPub/Sub, aslo called SubSocketChannel in IPython sources - self.iopub_socket = ctx.socket(zmq.PUB) - self.config["iopub_port"] = bind( - self.iopub_socket, connection, self.config["iopub_port"] - ) - self.iopub_stream = zmqstream.ZMQStream(self.iopub_socket) - self.iopub_stream.on_recv(self.iopub_handler) - - # Control - self.control_socket = ctx.socket(zmq.ROUTER) - self.config["control_port"] = bind( - self.control_socket, connection, self.config["control_port"] - ) - self.control_stream = zmqstream.ZMQStream(self.control_socket) - self.control_stream.on_recv(self.control_handler) - - # Stdin: - self.stdin_socket = ctx.socket(zmq.ROUTER) - self.config["stdin_port"] = bind( - self.stdin_socket, connection, self.config["stdin_port"] - ) - self.stdin_stream = zmqstream.ZMQStream(self.stdin_socket) - self.stdin_stream.on_recv(self.stdin_handler) - - # Shell - self.shell_socket = ctx.socket(zmq.ROUTER) - self.config["shell_port"] = bind( - self.shell_socket, connection, self.config["shell_port"] - ) - self.shell_stream = zmqstream.ZMQStream(self.shell_socket) - self.shell_stream.on_recv(self.shell_handler) - - # start up configurtation - self.dprint(2, "Config:", json.dumps(self.config)) - self.dprint(1, "Starting loops...") - self.hb_thread = threading.Thread(target=self.heartbeat_loop) - self.hb_thread.daemon = True - self.hb_thread.start() - self.dprint(1, "Ready! Listening...") - ioloop.IOLoop.instance().start() - - def shutdown(self): - """Shutsdown the kernel""" - self.exiting = True - ioloop.IOLoop.instance().stop() - - def dprint(self, level, *args, **kwargs): - """Print but with debug information.""" - if level <= self.debug_level: - print("DEBUG" + str(level) + ":", file=sys.__stdout__, *args, **kwargs) - sys.__stdout__.flush() - - def sign(self, messages): - """Sign a message list with a secure signature.""" - h = self.auth.copy() - for m in messages: - h.update(m) - return h.hexdigest().encode("ascii") - - def new_header(self, message_type): - """Make a new header""" - return { - "date": datetime.datetime.now().isoformat(), - "msg_id": str(uuid.uuid4()), - "username": "kernel", - "session": self.session_id, - "msg_type": message_type, - "version": "5.0", - } - - def send( - self, - stream, - message_type, - content=None, - parent_header=None, - metadata=None, - identities=None, - ): - """Send data to the client via a stream""" - header = self.new_header(message_type) - if content is None: - content = {} - if parent_header is None: - parent_header = {} - if metadata is None: - metadata = {} - - messages = list(map(dump_bytes, [header, parent_header, metadata, content])) - signature = self.sign(messages) - parts = [DELIM, signature] + messages - if identities: - parts = identities + parts - self.dprint(3, "send parts:", parts) - stream.send_multipart(parts) - if isinstance(stream, zmqstream.ZMQStream): - stream.flush() - - def deserialize_wire_message(self, wire_message): - """Split the routing prefix and message frames from a message on the wire""" - delim_idx = wire_message.index(DELIM) - identities = wire_message[:delim_idx] - m_signature = wire_message[delim_idx + 1] - msg_frames = wire_message[delim_idx + 2 :] - - keys = ("header", "parent_header", "metadata", "content") - m = {k: load_bytes(v) for k, v in zip(keys, msg_frames)} - check_sig = self.sign(msg_frames) - if check_sig != m_signature: - raise ValueError("Signatures do not match") - return identities, m - - def run_thread(self, loop, name): - """Run main thread""" - self.dprint(2, "Starting loop for {name!r}...".format(name=name)) - while not self.exiting: - self.dprint(2, "{} Loop!".format(name)) - try: - loop.start() - except ZMQError as e: - self.dprint(1, "{} ZMQError!\n {}".format(name, e)) - if e.errno == errno.EINTR: - continue - else: - raise - except Exception: - self.dprint(2, "{} Exception!".format(name)) - if self.exiting: - break - else: - raise - else: - self.dprint(2, "{} Break!".format(name)) - break - - def heartbeat_loop(self): - """Run heartbeat""" - self.dprint(2, "Starting heartbeat loop...") - while not self.exiting: - self.dprint(3, ".", end="") - try: - zmq.device(zmq.FORWARDER, self.heartbeat_socket, self.heartbeat_socket) - except zmq.ZMQError as e: - if e.errno == errno.EINTR: - continue - else: - raise - else: - break - - def shell_handler(self, message): - """Dispatch shell messages to their handlers""" - self.dprint(1, "received:", message) - identities, msg = self.deserialize_wire_message(message) - handler = getattr(self, "handle_" + msg["header"]["msg_type"], None) - if handler is None: - self.dprint(0, "unknown message type:", msg["header"]["msg_type"]) - return - handler(msg, identities) - - def handle_execute_request(self, message, identities): - """Handle execute request messages.""" - self.dprint(2, "Xonsh Kernel Executing:", pformat(message["content"]["code"])) - # Start by sending busy signal - content = {"execution_state": "busy"} - self.send(self.iopub_stream, "status", content, parent_header=message["header"]) - - # confirm the input that we are executing - content = { - "execution_count": self.execution_count, - "code": message["content"]["code"], - } - self.send( - self.iopub_stream, "execute_input", content, parent_header=message["header"] - ) - - # execute the code - metadata = { - "dependencies_met": True, - "engine": self.session_id, - "status": "ok", - "started": datetime.datetime.now().isoformat(), - } - content = self.do_execute(parent_header=message["header"], **message["content"]) - self.send( - self.shell_stream, - "execute_reply", - content, - metadata=metadata, - parent_header=message["header"], - identities=identities, - ) - self.execution_count += 1 - - # once we are done, send a signal that we are idle - content = {"execution_state": "idle"} - self.send(self.iopub_stream, "status", content, parent_header=message["header"]) - - def do_execute( - self, - code="", - silent=False, - store_history=True, - user_expressions=None, - allow_stdin=False, - parent_header=None, - **kwargs - ): - """Execute user code.""" - if len(code.strip()) == 0: - return { - "status": "ok", - "execution_count": self.execution_count, - "payload": [], - "user_expressions": {}, - } - shell = builtins.__xonsh__.shell - hist = builtins.__xonsh__.history - try: - shell.default(code, self, parent_header) - interrupted = False - except KeyboardInterrupt: - interrupted = True - - if interrupted: - return {"status": "abort", "execution_count": self.execution_count} - - rtn = 0 if (hist is None or len(hist) == 0) else hist.rtns[-1] - if 0 < rtn: - message = { - "status": "error", - "execution_count": self.execution_count, - "ename": "", - "evalue": str(rtn), - "traceback": [], - } - else: - message = { - "status": "ok", - "execution_count": self.execution_count, - "payload": [], - "user_expressions": {}, - } - return message - - def _respond_in_chunks(self, name, s, chunksize=1024, parent_header=None): - if s is None: - return - n = len(s) - if n == 0: - return - lower = range(0, n, chunksize) - upper = range(chunksize, n + chunksize, chunksize) - for l, u in zip(lower, upper): - response = {"name": name, "text": s[l:u]} - self.send( - self.iopub_socket, "stream", response, parent_header=parent_header - ) - - def handle_complete_request(self, message, identities): - """Handles kernel info requests.""" - content = self.do_complete( - message["content"]["code"], message["content"]["cursor_pos"] - ) - self.send( - self.shell_stream, - "complete_reply", - content, - parent_header=message["header"], - identities=identities, - ) - - def do_complete(self, code, pos): - """Get completions.""" - shell = builtins.__xonsh__.shell - line = code.split("\n")[-1] - line = builtins.aliases.expand_alias(line) - prefix = line.split(" ")[-1] - endidx = pos - begidx = pos - len(prefix) - rtn, _ = self.completer.complete(prefix, line, begidx, endidx, shell.ctx) - if isinstance(rtn, Set): - rtn = list(rtn) - message = { - "matches": rtn, - "cursor_start": begidx, - "cursor_end": endidx, - "metadata": {}, - "status": "ok", - } - return message - - def handle_kernel_info_request(self, message, identities): - """Handles kernel info requests.""" - content = { - "protocol_version": "5.0", - "ipython_version": [1, 1, 0, ""], - "language": self.language, - "language_version": self.language_version, - "implementation": self.implementation, - "implementation_version": self.implementation_version, - "language_info": self.language_info, - "banner": self.banner, - } - self.send( - self.shell_stream, - "kernel_info_reply", - content, - parent_header=message["header"], - identities=identities, - ) - - -if __name__ == "__main__": - setup( - shell_type="jupyter", - env={"PAGER": "cat"}, - aliases={"less": "cat"}, - xontribs=["coreutils"], - threadable_predictors={"git": predict_true, "man": predict_true}, - ) - if builtins.__xonsh__.commands_cache.is_only_functional_alias("cat"): - # this is needed if the underlying system doesn't have cat - # we supply our own, because we can - builtins.aliases["cat"] = "xonsh-cat" - builtins.__xonsh__.env["PAGER"] = "xonsh-cat" - shell = builtins.__xonsh__.shell - kernel = shell.kernel = XonshKernel() - kernel.start() diff --git a/xonsh/jupyter_shell.py b/xonsh/jupyter_shell.py deleted file mode 100644 index 922befa..0000000 --- a/xonsh/jupyter_shell.py +++ /dev/null @@ -1,144 +0,0 @@ -"""An interactive shell for the Jupyter kernel.""" -import io -import sys -import builtins - -from xonsh.base_shell import BaseShell - - -class StdJupyterRedirectBuf(io.RawIOBase): - """Redirects standard I/O buffers to the Jupyter kernel.""" - - def __init__(self, redirect): - self.redirect = redirect - self.encoding = redirect.encoding - self.errors = redirect.errors - - def fileno(self): - """Returns the file descriptor of the std buffer.""" - return self.redirect.fileno() - - def seek(self, offset, whence=io.SEEK_SET): - """Sets the location in both the stdbuf and the membuf.""" - raise io.UnsupportedOperation("cannot seek Jupyter redirect") - - def truncate(self, size=None): - """Truncate both buffers.""" - raise io.UnsupportedOperation("cannot truncate Jupyter redirect") - - def readinto(self, b): - """Read bytes into buffer from both streams.""" - raise io.UnsupportedOperation("cannot read into Jupyter redirect") - - def write(self, b): - """Write bytes to kernel.""" - s = b if isinstance(b, str) else b.decode(self.encoding, self.errors) - self.redirect.write(s) - - -class StdJupyterRedirect(io.TextIOBase): - """Redirects a standard I/O stream to the Jupyter kernel.""" - - def __init__(self, name, kernel, parent_header=None): - """ - Parameters - ---------- - name : str - The name of the buffer in the sys module, e.g. 'stdout'. - kernel : XonshKernel - Instance of a Jupyter kernel - parent_header : dict or None, optional - parent header information to pass along with the kernel - """ - self._name = name - self.kernel = kernel - self.parent_header = parent_header - - self.std = getattr(sys, name) - self.buffer = StdJupyterRedirectBuf(self) - setattr(sys, name, self) - - @property - def encoding(self): - """The encoding of the stream""" - env = builtins.__xonsh__.env - return getattr(self.std, "encoding", env.get("XONSH_ENCODING")) - - @property - def errors(self): - """The encoding errors of the stream""" - env = builtins.__xonsh__.env - return getattr(self.std, "errors", env.get("XONSH_ENCODING_ERRORS")) - - @property - def newlines(self): - """The newlines of the standard buffer.""" - return self.std.newlines - - def _replace_std(self): - std = self.std - if std is None: - return - setattr(sys, self._name, std) - self.std = None - - def __del__(self): - self._replace_std() - - def close(self): - """Restores the original std stream.""" - self._replace_std() - - def __enter__(self): - return self - - def __exit__(self, *args, **kwargs): - self.close() - - def write(self, s): - """Writes data to the original kernel stream.""" - self.kernel._respond_in_chunks(self._name, s, parent_header=self.parent_header) - - def flush(self): - """Flushes kernel iopub_stream.""" - self.kernel.iopub_stream.flush() - - def fileno(self): - """Tunnel fileno() calls to the std stream.""" - return self.std.fileno() - - def seek(self, offset, whence=io.SEEK_SET): - """Seek to a location.""" - raise io.UnsupportedOperation("cannot seek Jupyter redirect") - - def truncate(self, size=None): - """Truncate the streams.""" - raise io.UnsupportedOperation("cannot truncate Jupyter redirect") - - def detach(self): - """This operation is not supported.""" - raise io.UnsupportedOperation("cannot detach a Jupyter redirect") - - def read(self, size=None): - """Read from the stream""" - raise io.UnsupportedOperation("cannot read a Jupyter redirect") - - def readline(self, size=-1): - """Read a line.""" - raise io.UnsupportedOperation("cannot read a line from a Jupyter redirect") - - -class JupyterShell(BaseShell): - """A shell for the Jupyter kernel.""" - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.kernel = None - - def default(self, line, kernel, parent_header=None): - """Executes code, but redirects output to Jupyter client""" - stdout = StdJupyterRedirect("stdout", kernel, parent_header) - stderr = StdJupyterRedirect("stderr", kernel, parent_header) - with stdout, stderr: - rtn = super().default(line) - return rtn diff --git a/xonsh/lazyasd.py b/xonsh/lazyasd.py deleted file mode 100644 index 9b99ee4..0000000 --- a/xonsh/lazyasd.py +++ /dev/null @@ -1,351 +0,0 @@ -"""Lazy and self destructive containers for speeding up module import.""" -# Copyright 2015-2016, the xonsh developers. All rights reserved. -import os -import sys -import time -import types -import builtins -import threading -import importlib -import importlib.util -import collections.abc as cabc - -__version__ = "0.1.3" - - -class LazyObject(object): - def __init__(self, load, ctx, name): - """Lazily loads an object via the load function the first time an - attribute is accessed. Once loaded it will replace itself in the - provided context (typically the globals of the call site) with the - given name. - - For example, you can prevent the compilation of a regular expression - until it is actually used:: - - DOT = LazyObject((lambda: re.compile('.')), globals(), 'DOT') - - Parameters - ---------- - load : function with no arguments - A loader function that performs the actual object construction. - ctx : Mapping - Context to replace the LazyObject instance in - with the object returned by load(). - name : str - Name in the context to give the loaded object. This *should* - be the name on the LHS of the assignment. - """ - self._lasdo = {"loaded": False, "load": load, "ctx": ctx, "name": name} - - def _lazy_obj(self): - d = self._lasdo - if d["loaded"]: - obj = d["obj"] - else: - obj = d["load"]() - d["ctx"][d["name"]] = d["obj"] = obj - d["loaded"] = True - return obj - - def __getattribute__(self, name): - if name == "_lasdo" or name == "_lazy_obj": - return super().__getattribute__(name) - obj = self._lazy_obj() - return getattr(obj, name) - - def __bool__(self): - obj = self._lazy_obj() - return bool(obj) - - def __iter__(self): - obj = self._lazy_obj() - yield from obj - - def __getitem__(self, item): - obj = self._lazy_obj() - return obj[item] - - def __setitem__(self, key, value): - obj = self._lazy_obj() - obj[key] = value - - def __delitem__(self, item): - obj = self._lazy_obj() - del obj[item] - - def __call__(self, *args, **kwargs): - obj = self._lazy_obj() - return obj(*args, **kwargs) - - def __lt__(self, other): - obj = self._lazy_obj() - return obj < other - - def __le__(self, other): - obj = self._lazy_obj() - return obj <= other - - def __eq__(self, other): - obj = self._lazy_obj() - return obj == other - - def __ne__(self, other): - obj = self._lazy_obj() - return obj != other - - def __gt__(self, other): - obj = self._lazy_obj() - return obj > other - - def __ge__(self, other): - obj = self._lazy_obj() - return obj >= other - - def __hash__(self): - obj = self._lazy_obj() - return hash(obj) - - def __or__(self, other): - obj = self._lazy_obj() - return obj | other - - def __str__(self): - return str(self._lazy_obj()) - - def __repr__(self): - return repr(self._lazy_obj()) - - -def lazyobject(f): - """Decorator for constructing lazy objects from a function.""" - return LazyObject(f, f.__globals__, f.__name__) - - -class LazyDict(cabc.MutableMapping): - def __init__(self, loaders, ctx, name): - """Dictionary like object that lazily loads its values from an initial - dict of key-loader function pairs. Each key is loaded when its value - is first accessed. Once fully loaded, this object will replace itself - in the provided context (typically the globals of the call site) with - the given name. - - For example, you can prevent the compilation of a bunch of regular - expressions until they are actually used:: - - RES = LazyDict({ - 'dot': lambda: re.compile('.'), - 'all': lambda: re.compile('.*'), - 'two': lambda: re.compile('..'), - }, globals(), 'RES') - - Parameters - ---------- - loaders : Mapping of keys to functions with no arguments - A mapping of loader function that performs the actual value - construction upon access. - ctx : Mapping - Context to replace the LazyDict instance in - with the the fully loaded mapping. - name : str - Name in the context to give the loaded mapping. This *should* - be the name on the LHS of the assignment. - """ - self._loaders = loaders - self._ctx = ctx - self._name = name - self._d = type(loaders)() # make sure to return the same type - - def _destruct(self): - if len(self._loaders) == 0: - self._ctx[self._name] = self._d - - def __getitem__(self, key): - d = self._d - if key in d: - val = d[key] - else: - # pop will raise a key error for us - loader = self._loaders.pop(key) - d[key] = val = loader() - self._destruct() - return val - - def __setitem__(self, key, value): - self._d[key] = value - if key in self._loaders: - del self._loaders[key] - self._destruct() - - def __delitem__(self, key): - if key in self._d: - del self._d[key] - else: - del self._loaders[key] - self._destruct() - - def __iter__(self): - yield from (set(self._d.keys()) | set(self._loaders.keys())) - - def __len__(self): - return len(self._d) + len(self._loaders) - - -def lazydict(f): - """Decorator for constructing lazy dicts from a function.""" - return LazyDict(f, f.__globals__, f.__name__) - - -class LazyBool(object): - def __init__(self, load, ctx, name): - """Boolean like object that lazily computes it boolean value when it is - first asked. Once loaded, this result will replace itself - in the provided context (typically the globals of the call site) with - the given name. - - For example, you can prevent the complex boolean until it is actually - used:: - - ALIVE = LazyDict(lambda: not DEAD, globals(), 'ALIVE') - - Parameters - ---------- - load : function with no arguments - A loader function that performs the actual boolean evaluation. - ctx : Mapping - Context to replace the LazyBool instance in - with the the fully loaded mapping. - name : str - Name in the context to give the loaded mapping. This *should* - be the name on the LHS of the assignment. - """ - self._load = load - self._ctx = ctx - self._name = name - self._result = None - - def __bool__(self): - if self._result is None: - res = self._ctx[self._name] = self._result = self._load() - else: - res = self._result - return res - - -def lazybool(f): - """Decorator for constructing lazy booleans from a function.""" - return LazyBool(f, f.__globals__, f.__name__) - - -# -# Background module loaders -# - - -class BackgroundModuleProxy(types.ModuleType): - """Proxy object for modules loaded in the background that block attribute - access until the module is loaded.. - """ - - def __init__(self, modname): - self.__dct__ = {"loaded": False, "modname": modname} - - def __getattribute__(self, name): - passthrough = frozenset({"__dct__", "__class__", "__spec__"}) - if name in passthrough: - return super().__getattribute__(name) - dct = self.__dct__ - modname = dct["modname"] - if dct["loaded"]: - mod = sys.modules[modname] - else: - delay_types = (BackgroundModuleProxy, type(None)) - while isinstance(sys.modules.get(modname, None), delay_types): - time.sleep(0.001) - mod = sys.modules[modname] - dct["loaded"] = True - # some modules may do construction after import, give them a second - stall = 0 - while not hasattr(mod, name) and stall < 1000: - stall += 1 - time.sleep(0.001) - return getattr(mod, name) - - -class BackgroundModuleLoader(threading.Thread): - """Thread to load modules in the background.""" - - def __init__(self, name, package, replacements, *args, **kwargs): - super().__init__(*args, **kwargs) - self.daemon = True - self.name = name - self.package = package - self.replacements = replacements - self.start() - - def run(self): - # wait for other modules to stop being imported - # We assume that module loading is finished when sys.modules doesn't - # get longer in 5 consecutive 1ms waiting steps - counter = 0 - last = -1 - while counter < 5: - new = len(sys.modules) - if new == last: - counter += 1 - else: - last = new - counter = 0 - time.sleep(0.001) - # now import module properly - modname = importlib.util.resolve_name(self.name, self.package) - if isinstance(sys.modules[modname], BackgroundModuleProxy): - del sys.modules[modname] - mod = importlib.import_module(self.name, package=self.package) - for targname, varname in self.replacements.items(): - if targname in sys.modules: - targmod = sys.modules[targname] - setattr(targmod, varname, mod) - - -def load_module_in_background( - name, package=None, debug="DEBUG", env=None, replacements=None -): - """Entry point for loading modules in background thread. - - Parameters - ---------- - name : str - Module name to load in background thread. - package : str or None, optional - Package name, has the same meaning as in importlib.import_module(). - debug : str, optional - Debugging symbol name to look up in the environment. - env : Mapping or None, optional - Environment this will default to __xonsh__.env, if available, and - os.environ otherwise. - replacements : Mapping or None, optional - Dictionary mapping fully qualified module names (eg foo.bar.baz) that - import the lazily loaded module, with the variable name in that - module. For example, suppose that foo.bar imports module a as b, - this dict is then {'foo.bar': 'b'}. - - Returns - ------- - module : ModuleType - This is either the original module that is found in sys.modules or - a proxy module that will block until delay attribute access until the - module is fully loaded. - """ - modname = importlib.util.resolve_name(name, package) - if modname in sys.modules: - return sys.modules[modname] - if env is None: - xonsh_obj = getattr(builtins, "__xonsh__", None) - env = os.environ if xonsh_obj is None else getattr(xonsh_obj, "env", os.environ) - if env.get(debug, None): - mod = importlib.import_module(name, package=package) - return mod - proxy = sys.modules[modname] = BackgroundModuleProxy(modname) - BackgroundModuleLoader(name, package, replacements or {}) - return proxy diff --git a/xonsh/lazyimps.py b/xonsh/lazyimps.py deleted file mode 100644 index 2bb48b9..0000000 --- a/xonsh/lazyimps.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Lazy imports that may apply across the xonsh package.""" -import importlib - -from xonsh.platform import ON_WINDOWS, ON_DARWIN -from xonsh.lazyasd import LazyObject, lazyobject - -pygments = LazyObject( - lambda: importlib.import_module("pygments"), globals(), "pygments" -) -pyghooks = LazyObject( - lambda: importlib.import_module("xonsh.pyghooks"), globals(), "pyghooks" -) - - -@lazyobject -def pty(): - if ON_WINDOWS: - return - else: - return importlib.import_module("pty") - - -@lazyobject -def termios(): - if ON_WINDOWS: - return - else: - return importlib.import_module("termios") - - -@lazyobject -def fcntl(): - if ON_WINDOWS: - return - else: - return importlib.import_module("fcntl") - - -@lazyobject -def tty(): - if ON_WINDOWS: - return - else: - return importlib.import_module("tty") - - -@lazyobject -def _winapi(): - if ON_WINDOWS: - import _winapi as m - else: - m = None - return m - - -@lazyobject -def msvcrt(): - if ON_WINDOWS: - import msvcrt as m - else: - m = None - return m - - -@lazyobject -def winutils(): - if ON_WINDOWS: - import xonsh.winutils as m - else: - m = None - return m - - -@lazyobject -def macutils(): - if ON_DARWIN: - import xonsh.macutils as m - else: - m = None - return m - - -@lazyobject -def terminal256(): - return importlib.import_module("pygments.formatters.terminal256") diff --git a/xonsh/lazyjson.py b/xonsh/lazyjson.py index bc69ea1..2615c13 100644 --- a/xonsh/lazyjson.py +++ b/xonsh/lazyjson.py @@ -1,56 +1,60 @@ -# -*- coding: utf-8 -*- """Implements a lazy JSON file class that wraps around json data.""" import io -import json import weakref -import contextlib -import collections.abc as cabc +from contextlib import contextmanager +from collections import Mapping, Sequence + +try: + import simplejson as json +except ImportError: + import json + +from xonsh.tools import string_types def _to_json_with_size(obj, offset=0, sort_keys=False): - if isinstance(obj, str): + if isinstance(obj, string_types): s = json.dumps(obj) o = offset n = size = len(s.encode()) # size in bytes - elif isinstance(obj, cabc.Mapping): - s = "{" + elif isinstance(obj, Mapping): + s = '{' j = offset + 1 o = {} size = {} items = sorted(obj.items()) if sort_keys else obj.items() for key, val in items: - s_k, o_k, n_k, size_k = _to_json_with_size( - key, offset=j, sort_keys=sort_keys - ) - s += s_k + ": " + s_k, o_k, n_k, size_k = _to_json_with_size(key, offset=j, + sort_keys=sort_keys) + s += s_k + ': ' j += n_k + 2 - s_v, o_v, n_v, size_v = _to_json_with_size( - val, offset=j, sort_keys=sort_keys - ) + s_v, o_v, n_v, size_v = _to_json_with_size(val, offset=j, + sort_keys=sort_keys) o[key] = o_v size[key] = size_v - s += s_v + ", " + s += s_v + ', ' j += n_v + 2 - if s.endswith(", "): + if s.endswith(', '): s = s[:-2] - s += "}\n" + s += '}\n' n = len(s) - o["__total__"] = offset - size["__total__"] = n - elif isinstance(obj, cabc.Sequence): - s = "[" + o['__total__'] = offset + size['__total__'] = n + elif isinstance(obj, Sequence): + s = '[' j = offset + 1 o = [] size = [] for x in obj: - s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j, sort_keys=sort_keys) + s_x, o_x, n_x, size_x = _to_json_with_size(x, offset=j, + sort_keys=sort_keys) o.append(o_x) size.append(size_x) - s += s_x + ", " + s += s_x + ', ' j += n_x + 2 - if s.endswith(", "): + if s.endswith(', '): s = s[:-2] - s += "]\n" + s += ']\n' n = len(s) o.append(offset) size.append(n) @@ -65,11 +69,12 @@ def index(obj, sort_keys=False): """Creates an index for a JSON file.""" idx = {} json_obj = _to_json_with_size(obj, sort_keys=sort_keys) - s, idx["offsets"], _, idx["sizes"] = json_obj + s, idx['offsets'], _, idx['sizes'] = json_obj return s, idx -JSON_FORMAT = """{{"locs": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}], +JSON_FORMAT = \ +"""{{"locs": [{iloc:>10}, {ilen:>10}, {dloc:>10}, {dlen:>10}], "index": {index}, "data": {data} }} @@ -84,19 +89,18 @@ def dumps(obj, sort_keys=False): ilen = len(jdx) dloc = iloc + ilen + 11 dlen = len(data) - s = JSON_FORMAT.format( - index=jdx, data=data, iloc=iloc, ilen=ilen, dloc=dloc, dlen=dlen - ) + s = JSON_FORMAT.format(index=jdx, data=data, iloc=iloc, ilen=ilen, + dloc=dloc, dlen=dlen) return s -def ljdump(obj, fp, sort_keys=False): +def dump(obj, fp, sort_keys=False): """Dumps an object to JSON file.""" s = dumps(obj, sort_keys=sort_keys) fp.write(s) -class LJNode(cabc.Mapping, cabc.Sequence): +class Node(Mapping, Sequence): """A proxy node for JSON nodes. Acts as both sequence and mapping.""" def __init__(self, offsets, sizes, root): @@ -112,8 +116,8 @@ def __init__(self, offsets, sizes, root): self.offsets = offsets self.sizes = sizes self.root = root - self.is_mapping = isinstance(self.offsets, cabc.Mapping) - self.is_sequence = isinstance(self.offsets, cabc.Sequence) + self.is_mapping = isinstance(self.offsets, Mapping) + self.is_sequence = isinstance(self.offsets, Sequence) def __len__(self): # recall that for maps, the '__total__' key is added and for @@ -123,8 +127,8 @@ def __len__(self): def load(self): """Returns the Python data structure represented by the node.""" if self.is_mapping: - offset = self.offsets["__total__"] - size = self.sizes["__total__"] + offset = self.offsets['__total__'] + size = self.sizes['__total__'] elif self.is_sequence: offset = self.offsets[-1] size = self.sizes[-1] @@ -135,18 +139,18 @@ def load(self): def _load_or_node(self, offset, size): if isinstance(offset, int): - with self.root._open(newline="\n") as f: + with self.root._open(newline='\n') as f: f.seek(self.root.dloc + offset) s = f.read(size) val = json.loads(s) - elif isinstance(offset, (cabc.Mapping, cabc.Sequence)): - val = LJNode(offset, size, self.root) + elif isinstance(offset, (Mapping, Sequence)): + val = Node(offset, size, self.root) else: - raise TypeError("incorrect types for offset node") + raise TypeError('incorrect types for offset node') return val def _getitem_mapping(self, key): - if key == "__total__": + if key == '__total__': raise KeyError('"__total__" is a special LazyJSON key!') offset = self.offsets[key] size = self.sizes[key] @@ -157,9 +161,10 @@ def _getitem_sequence(self, key): rtn = self._load_or_node(self.offsets[key], self.sizes[key]) elif isinstance(key, slice): key = slice(*key.indices(len(self))) - rtn = list(map(self._load_or_node, self.offsets[key], self.sizes[key])) + rtn = list(map(self._load_or_node, self.offsets[key], + self.sizes[key])) else: - raise TypeError("only integer indexing available") + raise TypeError('only integer indexing available') return rtn def __getitem__(self, key): @@ -174,7 +179,7 @@ def __getitem__(self, key): def __iter__(self): if self.is_mapping: keys = set(self.offsets.keys()) - keys.discard("__total__") + keys.discard('__total__') yield from iter(keys) elif self.is_sequence: i = 0 @@ -186,7 +191,7 @@ def __iter__(self): raise NotImplementedError -class LazyJSON(LJNode): +class LazyJSON(Node): """Represents a lazy json file. Can be used like a normal Python dict or list. """ @@ -201,12 +206,12 @@ def __init__(self, f, reopen=True): """ self._f = f self.reopen = reopen - if not reopen and isinstance(f, str): - self._f = open(f, "r", newline="\n") + if not reopen and isinstance(f, string_types): + self._f = open(f, 'r', newline='\n') self._load_index() self.root = weakref.proxy(self) - self.is_mapping = isinstance(self.offsets, cabc.Mapping) - self.is_sequence = isinstance(self.offsets, cabc.Sequence) + self.is_mapping = isinstance(self.offsets, Mapping) + self.is_sequence = isinstance(self.offsets, Sequence) def __del__(self): self.close() @@ -214,14 +219,11 @@ def __del__(self): def close(self): """Close the file handle, if appropriate.""" if not self.reopen and isinstance(self._f, io.IOBase): - try: - self._f.close() - except OSError: - pass + self._f.close() - @contextlib.contextmanager + @contextmanager def _open(self, *args, **kwargs): - if self.reopen and isinstance(self._f, str): + if self.reopen and isinstance(self._f, string_types): f = open(self._f, *args, **kwargs) yield f f.close() @@ -230,7 +232,7 @@ def _open(self, *args, **kwargs): def _load_index(self): """Loads the index from the start of the file.""" - with self._open(newline="\n") as f: + with self._open(newline='\n') as f: # read in the location data f.seek(9) locs = f.read(48) @@ -240,11 +242,12 @@ def _load_index(self): f.seek(self.iloc) idx = f.read(self.ilen) idx = json.loads(idx) - self.offsets = idx["offsets"] - self.sizes = idx["sizes"] + self.offsets = idx['offsets'] + self.sizes = idx['sizes'] def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.close() + diff --git a/xonsh/lexer.py b/xonsh/lexer.py index 2ebc490..2650250 100644 --- a/xonsh/lexer.py +++ b/xonsh/lexer.py @@ -1,153 +1,283 @@ -# -*- coding: utf-8 -*- -"""Lexer for xonsh code. - -Written using a hybrid of ``tokenize`` and PLY. """ -import io +Lexer for xonsh code, written using a hybrid of ``tokenize`` and PLY +""" import re +import sys +import tokenize + +from io import BytesIO +from keyword import kwlist + +from ply import lex +from ply.lex import TOKEN, LexToken + +from xonsh.tools import VER_3_5, VER_MAJOR_MINOR -# 'keyword' interferes with ast.keyword -import keyword as kwmod - -from xonsh.ply.ply.lex import LexToken - -from xonsh.lazyasd import lazyobject -from xonsh.platform import PYTHON_VERSION_INFO -from xonsh.tokenize import ( - OP, - IOREDIRECT, - STRING, - DOLLARNAME, - NUMBER, - SEARCHPATH, - NEWLINE, - INDENT, - DEDENT, - NL, - COMMENT, - ENCODING, - ENDMARKER, - NAME, - ERRORTOKEN, - GREATER, - LESS, - RIGHTSHIFT, - tokenize, - TokenError, -) - - -@lazyobject -def token_map(): - """Mapping from ``tokenize`` tokens (or token types) to PLY token types. If - a simple one-to-one mapping from ``tokenize`` to PLY exists, the lexer will - look it up here and generate a single PLY token of the given type. - Otherwise, it will fall back to handling that token using one of the - handlers in``special_handlers``. - """ - tm = {} - # operators - _op_map = { - # punctuation - ",": "COMMA", - ".": "PERIOD", - ";": "SEMI", - ":": "COLON", - "...": "ELLIPSIS", - # basic operators - "+": "PLUS", - "-": "MINUS", - "*": "TIMES", - "@": "AT", - "/": "DIVIDE", - "//": "DOUBLEDIV", - "%": "MOD", - "**": "POW", - "|": "PIPE", - "~": "TILDE", - "^": "XOR", - "<<": "LSHIFT", - ">>": "RSHIFT", - "<": "LT", - "<=": "LE", - ">": "GT", - ">=": "GE", - "==": "EQ", - "!=": "NE", - "->": "RARROW", - # assignment operators - "=": "EQUALS", - "+=": "PLUSEQUAL", - "-=": "MINUSEQUAL", - "*=": "TIMESEQUAL", - "@=": "ATEQUAL", - "/=": "DIVEQUAL", - "%=": "MODEQUAL", - "**=": "POWEQUAL", - "<<=": "LSHIFTEQUAL", - ">>=": "RSHIFTEQUAL", - "&=": "AMPERSANDEQUAL", - "^=": "XOREQUAL", - "|=": "PIPEEQUAL", - "//=": "DOUBLEDIVEQUAL", - # extra xonsh operators - "?": "QUESTION", - "??": "DOUBLE_QUESTION", - "@$": "ATDOLLAR", - "&": "AMPERSAND", - } - for (op, typ) in _op_map.items(): - tm[(OP, op)] = typ - tm[IOREDIRECT] = "IOREDIRECT" - tm[STRING] = "STRING" - tm[DOLLARNAME] = "DOLLAR_NAME" - tm[NUMBER] = "NUMBER" - tm[SEARCHPATH] = "SEARCHPATH" - tm[NEWLINE] = "NEWLINE" - tm[INDENT] = "INDENT" - tm[DEDENT] = "DEDENT" - if (3, 5, 0) <= PYTHON_VERSION_INFO < (3, 7, 0): - from xonsh.tokenize import ASYNC, AWAIT - - tm[ASYNC] = "ASYNC" - tm[AWAIT] = "AWAIT" - return tm - - -NEED_WHITESPACE = frozenset(["and", "or"]) - - -@lazyobject -def RE_NEED_WHITESPACE(): - pattern = r"\s?(" + "|".join(NEED_WHITESPACE) + r")(\s|[\\]$)" - return re.compile(pattern) - - -def handle_name(state, token): - """Function for handling name tokens""" - typ = "NAME" - state["last"] = token - needs_whitespace = token.string in NEED_WHITESPACE - has_whitespace = needs_whitespace and RE_NEED_WHITESPACE.match( - token.line[max(0, token.start[1] - 1) :] - ) - if state["pymode"][-1][0]: - if needs_whitespace and not has_whitespace: - pass - elif token.string in kwmod.kwlist: +future_kwlist = [] + +token_map = {} +""" +Mapping from ``tokenize`` tokens (or token types) to PLY token types. If a +simple one-to-one mapping from ``tokenize`` to PLY exists, the lexer will look +it up here and generate a single PLY token of the given type. Otherwise, it +will fall back to handling that token using one of the handlers in +``special_handlers``. +""" + +# operators +_op_map = { + # punctuation + ',': 'COMMA', '.': 'PERIOD', ';': 'SEMI', ':': 'COLON', + '...': 'ELLIPSIS', + # basic operators + '+': 'PLUS', '-': 'MINUS', '*': 'TIMES', '@': 'AT', '/': 'DIVIDE', + '//': 'DOUBLEDIV', '%': 'MOD', '**': 'POW', '|': 'PIPE', + '~': 'TILDE', '^': 'XOR', '<<': 'LSHIFT', '>>': 'RSHIFT', + '<': 'LT', '<=': 'LE', '>': 'GT', '>=': 'GE', '==': 'EQ', + '!=': 'NE', '->': 'RARROW', + # assignment operators + '=': 'EQUALS', '+=': 'PLUSEQUAL', '-=': 'MINUSEQUAL', + '*=': 'TIMESEQUAL', '@=': 'ATEQUAL', '/=': 'DIVEQUAL', '%=': 'MODEQUAL', + '**=': 'POWEQUAL', '<<=': 'LSHIFTEQUAL', '>>=': 'RSHIFTEQUAL', + '&=': 'AMPERSANDEQUAL', '^=': 'XOREQUAL', '|=': 'PIPEEQUAL', + '//=': 'DOUBLEDIVEQUAL', +} +for (op, type) in _op_map.items(): + token_map[(tokenize.OP, op)] = type + +token_map[tokenize.STRING] = 'STRING' +token_map[tokenize.NEWLINE] = 'NEWLINE' +token_map[tokenize.INDENT] = 'INDENT' +token_map[tokenize.DEDENT] = 'DEDENT' +if VER_3_5 <= VER_MAJOR_MINOR: + token_map[tokenize.ASYNC] = 'ASYNC' + token_map[tokenize.AWAIT] = 'AWAIT' +else: + future_kwlist += ['async', 'await'] + +_REDIRECT_NAMES = frozenset({'out', 'err', 'all', 'o', 'e', 'a'}) + + +def handle_name(state, token, stream): + """ + Function for handling name tokens + """ + typ = 'NAME' + state['last'] = token + if state['pymode'][-1][0]: + if token.string in kwlist: typ = token.string.upper() yield _new_token(typ, token.string, token.start) else: - if has_whitespace and token.string == "and": - yield _new_token("AND", token.string, token.start) - elif has_whitespace and token.string == "or": - yield _new_token("OR", token.string, token.start) + # subprocess mode + n = next(stream, None) + string = token.string + if (n is not None and n.string in {'<', '>', '>>'} and + n.start == token.end and token.string in _REDIRECT_NAMES): + # looks like a redirect to me! + e = n.end + string += n.string + n2 = next(stream, None) + if n2 is not None and n2.string == '&' and n2.start == n.end: + string += n2.string + e = n2.end + n2 = next(stream, None) + if n2 is not None: + if (n2.start == e and + (n2.type == tokenize.NUMBER or + (n2.type == tokenize.NAME and + n2.string in _REDIRECT_NAMES))): + string += n2.string + state['last'] = n2 + yield _new_token('IOREDIRECT', string, token.start) + else: + state['last'] = n + yield _new_token('IOREDIRECT', string, token.start) + yield from handle_token(state, n2, stream) + else: + state['last'] = n + yield _new_token('IOREDIRECT', string, token.start) + else: + yield _new_token('NAME', token.string, token.start) + if n is not None: + yield from handle_token(state, n, stream) + + +def _make_special_handler(token_type, extra_check=lambda x: True): + def inner_handler(state, token, stream): + state['last'] = token + if state['pymode'][-1][0]: + yield _new_token(token_type, token.string, token.start) else: - yield _new_token("NAME", token.string, token.start) + # subprocess mode + n = next(stream, None) + string = token.string + if (n is not None and + n.string in {'<', '>', '>>'} and + n.start == token.end): + e = n.end + string += n.string + n2 = next(stream, None) + if n2 is not None and n2.string == '&' and n2.start == n.end: + state['last'] = n2 + string += n2.string + e = n2.end + n2 = next(stream, None) + if n2 is not None: + if (n2.start == e and + (n2.type == tokenize.NUMBER or + (n2.type == tokenize.NAME and + n2.string in _REDIRECT_NAMES))): + string += n2.string + state['last'] = n2 + yield _new_token('IOREDIRECT', string, token.start) + else: + state['last'] = n + yield _new_token('IOREDIRECT', string, token.start) + yield from handle_token(state, n2, stream) + else: + state['last'] = n + yield _new_token('IOREDIRECT', string, token.start) + else: + yield _new_token(token_type, token.string, token.start) + if n is not None: + yield from handle_token(state, n, stream) + return inner_handler + + +handle_number = _make_special_handler('NUMBER') +"""Function for handling number tokens""" + +handle_ampersand = _make_special_handler('AMPERSAND') +"""Function for handling ampersand tokens""" + + +def handle_dollar(state, token, stream): + """ + Function for generating PLY tokens associated with ``$``. + """ + n = next(stream, None) + + if n is None: + m = "missing token after $" + yield _new_token("ERRORTOKEN", m, token.start) + elif n.start != token.end: + m = "unexpected whitespace after $" + yield _new_token("ERRORTOKEN", m, token.start) + elif n.type == tokenize.NAME: + state['last'] = n + yield _new_token('DOLLAR_NAME', '$' + n.string, token.start) + elif n.type == tokenize.OP and n.string == '(': + state['pymode'].append((False, '$(', ')', token.start)) + state['last'] = n + yield _new_token('DOLLAR_LPAREN', '$(', token.start) + elif n.type == tokenize.OP and n.string == '[': + state['pymode'].append((False, '$[', ']', token.start)) + state['last'] = n + yield _new_token('DOLLAR_LBRACKET', '$[', token.start) + elif n.type == tokenize.OP and n.string == '{': + state['pymode'].append((True, '${', '}', token.start)) + state['last'] = n + yield _new_token('DOLLAR_LBRACE', '${', token.start) + else: + e = 'expected NAME, (, [, or {{ after $, but got {0}' + m = e.format(n) + yield _new_token("ERRORTOKEN", m, token.start) + + +def handle_at(state, token, stream): + """ + Function for generating PLY tokens associated with ``@``. + """ + n = next(stream, None) + + if n is None: + state['last'] = token + m = "missing token after @" + yield _new_token("ERRORTOKEN", m, token.start) + elif n.type == tokenize.OP and n.string == '(' and \ + n.start == token.end: + state['pymode'].append((True, '@(', ')', token.start)) + state['last'] = n + yield _new_token('AT_LPAREN', '@(', token.start) + else: + state['last'] = token + yield _new_token('AT', '@', token.start) + yield from handle_token(state, n, stream) + + +def handle_question(state, token, stream): + """ + Function for generating PLY tokens for help and superhelp + """ + n = next(stream, None) + + if n is not None and n.type == tokenize.ERRORTOKEN and \ + n.string == '?' and n.start == token.end: + state['last'] = n + yield _new_token('DOUBLE_QUESTION', '??', token.start) + else: + state['last'] = token + yield _new_token('QUESTION', '?', token.start) + if n is not None: + yield from handle_token(state, n, stream) + + +def handle_backtick(state, token, stream): + """ + Function for generating PLY tokens representing regex globs. + """ + n = next(stream, None) + + found_match = False + sofar = '`' + while n is not None: + sofar += n.string + if n.type == tokenize.ERRORTOKEN and n.string == '`': + found_match = True + break + elif n.type == tokenize.NEWLINE or n.type == tokenize.NL: + break + n = next(stream, None) + if found_match: + state['last'] = n + yield _new_token('REGEXPATH', sofar, token.start) + else: + state['last'] = token + e = "Could not find matching backtick for regex on line {0}" + m = e.format(token.start[0]) + yield _new_token("ERRORTOKEN", m, token.start) + + +def handle_lparen(state, token, stream): + """ + Function for handling ``(`` + """ + state['pymode'].append((True, '(', ')', token.start)) + state['last'] = token + yield _new_token('LPAREN', '(', token.start) + + +def handle_lbrace(state, token, stream): + """ + Function for handling ``{`` + """ + state['pymode'].append((True, '{', '}', token.start)) + state['last'] = token + yield _new_token('LBRACE', '{', token.start) + + +def handle_lbracket(state, token, stream): + """ + Function for handling ``[`` + """ + state['pymode'].append((True, '[', ']', token.start)) + state['last'] = token + yield _new_token('LBRACKET', '[', token.start) def _end_delimiter(state, token): - py = state["pymode"] + py = state['pymode'] s = token.string l, c = token.start if len(py) > 1: @@ -159,171 +289,101 @@ def _end_delimiter(state, token): return 'Unmatched "{}" at line {}, column {}'.format(s, l, c) -def handle_rparen(state, token): +def handle_rparen(state, token, stream): """ Function for handling ``)`` """ e = _end_delimiter(state, token) if e is None: - state["last"] = token - yield _new_token("RPAREN", ")", token.start) + state['last'] = token + yield _new_token('RPAREN', ')', token.start) else: - yield _new_token("ERRORTOKEN", e, token.start) + yield _new_token('ERRORTOKEN', e, token.start) -def handle_rbrace(state, token): - """Function for handling ``}``""" +def handle_rbrace(state, token, stream): + """ + Function for handling ``}`` + """ e = _end_delimiter(state, token) if e is None: - state["last"] = token - yield _new_token("RBRACE", "}", token.start) + state['last'] = token + yield _new_token('RBRACE', '}', token.start) else: - yield _new_token("ERRORTOKEN", e, token.start) + yield _new_token('ERRORTOKEN', e, token.start) -def handle_rbracket(state, token): +def handle_rbracket(state, token, stream): """ Function for handling ``]`` """ e = _end_delimiter(state, token) if e is None: - state["last"] = token - yield _new_token("RBRACKET", "]", token.start) + state['last'] = token + yield _new_token('RBRACKET', ']', token.start) else: - yield _new_token("ERRORTOKEN", e, token.start) + yield _new_token('ERRORTOKEN', e, token.start) -def handle_error_space(state, token): +def handle_error_space(state, token, stream): """ Function for handling special whitespace characters in subprocess mode """ - if not state["pymode"][-1][0]: - state["last"] = token - yield _new_token("WS", token.string, token.start) + if not state['pymode'][-1][0]: + state['last'] = token + yield _new_token('WS', token.string, token.start) else: yield from [] -def handle_error_linecont(state, token): - """Function for handling special line continuations as whitespace - characters in subprocess mode. - """ - if state["pymode"][-1][0]: - return - prev = state["last"] - if prev.end != token.start: - return # previous token is separated by whitespace - state["last"] = token - yield _new_token("WS", "\\", token.start) - - -def handle_error_token(state, token): +def handle_error_token(state, token, stream): """ Function for handling error tokens """ - state["last"] = token - if token.string == "!": - typ = "BANG" - elif not state["pymode"][-1][0]: - typ = "NAME" + state['last'] = token + if not state['pymode'][-1][0]: + typ = 'NAME' else: - typ = "ERRORTOKEN" + typ = 'ERRORTOKEN' yield _new_token(typ, token.string, token.start) -def handle_ignore(state, token): - """Function for handling tokens that should be ignored""" +def handle_ignore(state, token, stream): + """ + Function for handling tokens that should be ignored + """ yield from [] -def handle_double_amps(state, token): - yield _new_token("AND", "and", token.start) - - -def handle_double_pipe(state, token): - yield _new_token("OR", "or", token.start) +special_handlers = { + tokenize.NL: handle_ignore, + tokenize.COMMENT: handle_ignore, + tokenize.ENCODING: handle_ignore, + tokenize.ENDMARKER: handle_ignore, + tokenize.NAME: handle_name, + tokenize.NUMBER: handle_number, + tokenize.ERRORTOKEN: handle_error_token, + (tokenize.OP, '&'): handle_ampersand, + (tokenize.OP, '@'): handle_at, + (tokenize.OP, '('): handle_lparen, + (tokenize.OP, ')'): handle_rparen, + (tokenize.OP, '{'): handle_lbrace, + (tokenize.OP, '}'): handle_rbrace, + (tokenize.OP, '['): handle_lbracket, + (tokenize.OP, ']'): handle_rbracket, + (tokenize.ERRORTOKEN, '$'): handle_dollar, + (tokenize.ERRORTOKEN, '`'): handle_backtick, + (tokenize.ERRORTOKEN, '?'): handle_question, + (tokenize.ERRORTOKEN, ' '): handle_error_space, +} +""" +Mapping from ``tokenize`` tokens (or token types) to the proper function for +generating PLY tokens from them. In addition to yielding PLY tokens, these +functions may manipulate the Lexer's state. +""" -def handle_redirect(state, token): - # The parser expects whitespace after a redirection in subproc mode. - # If whitespace does not exist, we'll issue an empty whitespace - # token before proceeding. - state["last"] = token - typ = token.type - st = token.string - key = (typ, st) if (typ, st) in token_map else typ - yield _new_token(token_map[key], st, token.start) - if state["pymode"][-1][0]: - return - # add a whitespace token after a redirection, if we need to - next_tok = next(state["stream"]) - if next_tok.start == token.end: - yield _new_token("WS", "", token.end) - yield from handle_token(state, next_tok) - - -def _make_matcher_handler(tok, typ, pymode, ender, handlers): - matcher = ( - ")" - if tok.endswith("(") - else "}" - if tok.endswith("{") - else "]" - if tok.endswith("[") - else None - ) - - def _inner_handler(state, token): - state["pymode"].append((pymode, tok, matcher, token.start)) - state["last"] = token - yield _new_token(typ, tok, token.start) - - handlers[(OP, tok)] = _inner_handler - - -@lazyobject -def special_handlers(): - """Mapping from ``tokenize`` tokens (or token types) to the proper - function for generating PLY tokens from them. In addition to - yielding PLY tokens, these functions may manipulate the Lexer's state. - """ - sh = { - NL: handle_ignore, - COMMENT: handle_ignore, - ENCODING: handle_ignore, - ENDMARKER: handle_ignore, - NAME: handle_name, - ERRORTOKEN: handle_error_token, - LESS: handle_redirect, - GREATER: handle_redirect, - RIGHTSHIFT: handle_redirect, - IOREDIRECT: handle_redirect, - (OP, "<"): handle_redirect, - (OP, ">"): handle_redirect, - (OP, ">>"): handle_redirect, - (OP, ")"): handle_rparen, - (OP, "}"): handle_rbrace, - (OP, "]"): handle_rbracket, - (OP, "&&"): handle_double_amps, - (OP, "||"): handle_double_pipe, - (ERRORTOKEN, " "): handle_error_space, - (ERRORTOKEN, "\\\n"): handle_error_linecont, - (ERRORTOKEN, "\\\r\n"): handle_error_linecont, - } - _make_matcher_handler("(", "LPAREN", True, ")", sh) - _make_matcher_handler("[", "LBRACKET", True, "]", sh) - _make_matcher_handler("{", "LBRACE", True, "}", sh) - _make_matcher_handler("$(", "DOLLAR_LPAREN", False, ")", sh) - _make_matcher_handler("$[", "DOLLAR_LBRACKET", False, "]", sh) - _make_matcher_handler("${", "DOLLAR_LBRACE", True, "}", sh) - _make_matcher_handler("!(", "BANG_LPAREN", False, ")", sh) - _make_matcher_handler("![", "BANG_LBRACKET", False, "]", sh) - _make_matcher_handler("@(", "AT_LPAREN", True, ")", sh) - _make_matcher_handler("@$(", "ATDOLLAR_LPAREN", False, ")", sh) - return sh - - -def handle_token(state, token): +def handle_token(state, token, stream): """ General-purpose token handler. Makes use of ``token_map`` or ``special_map`` to yield one or more PLY tokens from the given input. @@ -334,28 +394,30 @@ def handle_token(state, token): state : The current state of the lexer, including information about whether we are in Python mode or subprocess mode, which changes the lexer's - behavior. Also includes the stream of tokens yet to be considered. + behavior token : The token (from ``tokenize``) currently under consideration + stream : + A generator from which more tokens can be grabbed if necessary """ typ = token.type st = token.string - pymode = state["pymode"][-1][0] + pymode = state['pymode'][-1][0] if not pymode: - if state["last"] is not None and state["last"].end != token.start: + if state['last'] is not None and state['last'].end != token.start: cur = token.start - old = state["last"].end + old = state['last'].end if cur[0] == old[0] and cur[1] > old[1]: - yield _new_token("WS", token.line[old[1] : cur[1]], old) + yield _new_token('WS', token.line[old[1]:cur[1]], old) if (typ, st) in special_handlers: - yield from special_handlers[(typ, st)](state, token) + yield from special_handlers[(typ, st)](state, token, stream) elif (typ, st) in token_map: - state["last"] = token + state['last'] = token yield _new_token(token_map[(typ, st)], st, token.start) elif typ in special_handlers: - yield from special_handlers[typ](state, token) + yield from special_handlers[typ](state, token, stream) elif typ in token_map: - state["last"] = token + state['last'] = token yield _new_token(token_map[typ], st, token.start) else: m = "Unexpected token: {0}".format(token) @@ -367,31 +429,27 @@ def get_tokens(s): Given a string containing xonsh code, generates a stream of relevant PLY tokens using ``handle_token``. """ - state = { - "indents": [0], - "last": None, - "pymode": [(True, "", "", (0, 0))], - "stream": tokenize(io.BytesIO(s.encode("utf-8")).readline), - } + tokstream = tokenize.tokenize(BytesIO(s.encode('utf-8')).readline) + state = {'indents': [0], 'pymode': [(True, '', '', (0, 0))], 'last': None} while True: try: - token = next(state["stream"]) - yield from handle_token(state, token) + token = next(tokstream) + yield from handle_token(state, token, tokstream) except StopIteration: - if len(state["pymode"]) > 1: - pm, o, m, p = state["pymode"][-1] + if len(state['pymode']) > 1: + pm, o, m, p = state['pymode'][-1] l, c = p e = 'Unmatched "{}" at line {}, column {}' - yield _new_token("ERRORTOKEN", e.format(o, l, c), (0, 0)) + yield _new_token('ERRORTOKEN', e.format(o, l, c), (0, 0)) break - except TokenError as e: + except tokenize.TokenError as e: # this is recoverable in single-line mode (from the shell) # (e.g., EOF while scanning string literal) - yield _new_token("ERRORTOKEN", e.args[0], (0, 0)) + yield _new_token('ERRORTOKEN', e.args[0], (0, 0)) break except IndentationError as e: # this is never recoverable - yield _new_token("ERRORTOKEN", e, (0, 0)) + yield _new_token('ERRORTOKEN', e, (0, 0)) break @@ -407,8 +465,6 @@ def _new_token(type, value, pos): class Lexer(object): """Implements a lexer for the xonsh language.""" - _tokens = None - def __init__(self): """ Attributes @@ -421,9 +477,8 @@ def __init__(self): The last line number seen. """ - self.fname = "" + self.fname = '' self.last = None - self.beforelast = None def build(self, **kwargs): """Part of the PLY lexer API.""" @@ -438,7 +493,6 @@ def input(self, s): def token(self): """Retrieves the next token.""" - self.beforelast = self.last self.last = next(self.token_stream, None) return self.last @@ -448,61 +502,25 @@ def __iter__(self): yield t t = self.token() - def split(self, s): - """Splits a string into a list of strings which are whitespace-separated - tokens. - """ - vals = [] - self.input(s) - l = c = -1 - ws = "WS" - nl = "\n" - for t in self: - if t.type == ws: - continue - elif l < t.lineno: - vals.append(t.value) - elif len(vals) > 0 and c == t.lexpos: - vals[-1] = vals[-1] + t.value - else: - vals.append(t.value) - nnl = t.value.count(nl) - if nnl == 0: - l = t.lineno - c = t.lexpos + len(t.value) - else: - l = t.lineno + nnl - c = len(t.value.rpartition(nl)[-1]) - return vals - # # All the tokens recognized by the lexer # - @property - def tokens(self): - if self._tokens is None: - t = ( - tuple(token_map.values()) - + ( - "NAME", # name tokens - "BANG", # ! tokens - "WS", # whitespace in subprocess mode - "LPAREN", - "RPAREN", # ( ) - "LBRACKET", - "RBRACKET", # [ ] - "LBRACE", - "RBRACE", # { } - "AT_LPAREN", # @( - "BANG_LPAREN", # !( - "BANG_LBRACKET", # ![ - "DOLLAR_LPAREN", # $( - "DOLLAR_LBRACE", # ${ - "DOLLAR_LBRACKET", # $[ - "ATDOLLAR_LPAREN", # @$( - "ERRORTOKEN", # whoops! - ) - + tuple(i.upper() for i in kwmod.kwlist) - ) - self._tokens = t - return self._tokens + tokens = tuple(token_map.values()) + ( + 'NAME', # name tokens + 'NUMBER', # numbers + 'WS', # whitespace in subprocess mode + 'AMPERSAND', # & + 'REGEXPATH', # regex escaped with backticks + 'IOREDIRECT', # subprocess io redirection token + 'LPAREN', 'RPAREN', # ( ) + 'LBRACKET', 'RBRACKET', # [ ] + 'LBRACE', 'RBRACE', # { } + 'AT', # @ + 'QUESTION', # ? + 'DOUBLE_QUESTION', # ?? + 'AT_LPAREN', # @( + 'DOLLAR_NAME', # $NAME + 'DOLLAR_LPAREN', # $( + 'DOLLAR_LBRACE', # ${ + 'DOLLAR_LBRACKET', # $[ + ) + tuple(i.upper() for i in kwlist) + tuple(i.upper() for i in future_kwlist) diff --git a/xonsh/macutils.py b/xonsh/macutils.py deleted file mode 100644 index 3467c20..0000000 --- a/xonsh/macutils.py +++ /dev/null @@ -1,22 +0,0 @@ -"""Provides some Mac / Darwin based utility functions for xonsh.""" -from ctypes import c_uint, byref, create_string_buffer - -from xonsh.platform import LIBC - - -def sysctlbyname(name, return_str=True): - """Gets a sysctl value by name. If return_str is true, this will return - a string representation, else it will return the raw value. - """ - # forked from https://gist.github.com/pudquick/581a71425439f2cf8f09 - size = c_uint(0) - # Find out how big our buffer will be - LIBC.sysctlbyname(name, None, byref(size), None, 0) - # Make the buffer - buf = create_string_buffer(size.value) - # Re-run, but provide the buffer - LIBC.sysctlbyname(name, buf, byref(size), None, 0) - if return_str: - return buf.value - else: - return buf.raw diff --git a/xonsh/main.py b/xonsh/main.py index c4c0ca5..0d7103a 100644 --- a/xonsh/main.py +++ b/xonsh/main.py @@ -1,527 +1,112 @@ -# -*- coding: utf-8 -*- """The main xonsh script.""" import os import sys -import enum -import argparse -import builtins -import contextlib +import shlex import signal -import traceback +import builtins +import subprocess +from argparse import ArgumentParser, Namespace -from xonsh import __version__ -from xonsh.timings import setup_timings -from xonsh.lazyasd import lazyobject -from xonsh.shell import Shell -from xonsh.pretty import pretty -from xonsh.execer import Execer -from xonsh.proc import HiddenCommandPipeline -from xonsh.jobs import ignore_sigtstp -from xonsh.tools import setup_win_unicode_console, print_color, to_bool_or_int -from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS -from xonsh.codecache import run_script_with_cache, run_code_with_cache -from xonsh.xonfig import print_welcome_screen -from xonsh.xontribs import xontribs_load -from xonsh.lazyimps import pygments, pyghooks -from xonsh.imphooks import install_import_hooks -from xonsh.events import events -from xonsh.environ import xonshrc_context, make_args_env -from xonsh.built_ins import XonshSession, load_builtins, load_proxies +import click from gitsome import __version__ as gitsome_version - -events.transmogrify("on_post_init", "LoadEvent") -events.doc( - "on_post_init", - """ -on_post_init() -> None - -Fired after all initialization is finished and we're ready to do work. - -NOTE: This is fired before the wizard is automatically started. -""", -) - -events.transmogrify("on_exit", "LoadEvent") -events.doc( - "on_exit", - """ -on_exit() -> None - -Fired after all commands have been executed, before tear-down occurs. - -NOTE: All the caveats of the ``atexit`` module also apply to this event. -""", -) - - -events.transmogrify("on_pre_cmdloop", "LoadEvent") -events.doc( - "on_pre_cmdloop", - """ -on_pre_cmdloop() -> None - -Fired just before the command loop is started, if it is. -""", -) - -events.transmogrify("on_post_cmdloop", "LoadEvent") -events.doc( - "on_post_cmdloop", - """ -on_post_cmdloop() -> None - -Fired just after the command loop finishes, if it is. - -NOTE: All the caveats of the ``atexit`` module also apply to this event. -""", -) - -events.transmogrify("on_pre_rc", "LoadEvent") -events.doc( - "on_pre_rc", - """ -on_pre_rc() -> None - -Fired just before rc files are loaded, if they are. -""", -) - -events.transmogrify("on_post_rc", "LoadEvent") -events.doc( - "on_post_rc", - """ -on_post_rc() -> None - -Fired just after rc files are loaded, if they are. -""", -) - - -def get_setproctitle(): - """Proxy function for loading process title""" - try: - from setproctitle import setproctitle as spt - except ImportError: - return - return spt - - -def path_argument(s): - """Return a path only if the path is actually legal - - This is very similar to argparse.FileType, except that it doesn't return - an open file handle, but rather simply validates the path.""" - - s = os.path.abspath(os.path.expanduser(s)) - if not os.path.isfile(s): - msg = "{0!r} must be a valid path to a file".format(s) - raise argparse.ArgumentTypeError(msg) - return s +from xonsh import __version__ +from xonsh.shell import Shell +from xonsh.pretty import pprint +from xonsh.jobs import ignore_sigtstp -@lazyobject -def parser(): - p = argparse.ArgumentParser(description="xonsh", add_help=False) - p.add_argument( - "-h", - "--help", - dest="help", - action="store_true", - default=False, - help="show help and exit", - ) - p.add_argument( - "-V", - "--version", - dest="version", - action="store_true", - default=False, - help="show version information and exit", - ) - p.add_argument( - "-c", - help="Run a single command and exit", - dest="command", - required=False, - default=None, - ) - p.add_argument( - "-i", - "--interactive", - help="force running in interactive mode", - dest="force_interactive", - action="store_true", - default=False, - ) - p.add_argument( - "-l", - "--login", - help="run as a login shell", - dest="login", - action="store_true", - default=False, - ) - p.add_argument( - "--config-path", - help="DEPRECATED: static configuration files may now be used " - "in the XONSHRC file list, see the --rc option.", - dest="config_path", - default=None, - type=path_argument, - ) - p.add_argument( - "--rc", - help="The xonshrc files to load, these may be either xonsh " - "files or JSON-based static configuration files.", - dest="rc", - nargs="+", - type=path_argument, - default=None, - ) - p.add_argument( - "--no-rc", - help="Do not load the .xonshrc files", - dest="norc", - action="store_true", - default=False, - ) - p.add_argument( - "--no-script-cache", - help="Do not cache scripts as they are run", - dest="scriptcache", - action="store_false", - default=True, - ) - p.add_argument( - "--cache-everything", - help="Use a cache, even for interactive commands", - dest="cacheall", - action="store_true", - default=False, - ) - p.add_argument( - "-D", - dest="defines", - help="define an environment variable, in the form of " - "-DNAME=VAL. May be used many times.", - metavar="ITEM", - action="append", - default=None, - ) - p.add_argument( - "--shell-type", - help="What kind of shell should be used. " - "Possible options: readline, prompt_toolkit, random. " - "Warning! If set this overrides $SHELL_TYPE variable.", - dest="shell_type", - choices=tuple(Shell.shell_type_aliases.keys()), - default=None, - ) - p.add_argument( - "--timings", - help="Prints timing information before the prompt is shown. " - "This is useful while tracking down performance issues " - "and investigating startup times.", - dest="timings", - action="store_true", - default=None, - ) - p.add_argument( - "file", - metavar="script-file", - help="If present, execute the script in script-file" " and exit", - nargs="?", - default=None, - ) - p.add_argument( - "args", - metavar="args", - help="Additional arguments to the script specified " "by script-file", - nargs=argparse.REMAINDER, - default=[], - ) - return p +parser = ArgumentParser(description='xonsh') +parser.add_argument('-V', '--version', + action='version', + version='/'.join(('gitsome', gitsome_version)), + help='show version information and exit') +parser.add_argument('-c', + help="Run a single command and exit", + dest='command', + required=False, + default=None) +parser.add_argument('--no-rc', + help="Do not load the .xonshrc file", + dest='norc', + action='store_true', + default=False) +parser.add_argument('-D', + dest='defines', + help='define an environment variable, in the form of ' + '-DNAME=VAL. May be used many times.', + metavar='ITEM', + nargs='*', + default=None) +parser.add_argument('--shell-type', + help='What kind of shell should be used. ' + 'Possible options: readline, prompt_toolkit. ' + 'Warning! If set this overrides $SHELL_TYPE variable.', + dest='shell_type', + choices=('readline', 'prompt_toolkit'), + default=None) +parser.add_argument('file', + metavar='script-file', + help='If present, execute the script in script-file' + ' and exit', + nargs='?', + default=None) +parser.add_argument('args', + metavar='args', + help='Additional arguments to the script specified' + ' by script-file', + nargs='*', + default=[]) def _pprint_displayhook(value): - if value is None: - return - builtins._ = None # Set '_' to None to avoid recursion - if isinstance(value, HiddenCommandPipeline): + if value is not None: builtins._ = value - return - env = builtins.__xonsh__.env - if env.get("PRETTY_PRINT_RESULTS"): - printed_val = pretty(value) - else: - printed_val = repr(value) - if HAS_PYGMENTS and env.get("COLOR_RESULTS"): - tokens = list(pygments.lex(printed_val, lexer=pyghooks.XonshLexer())) - end = "" if env.get("SHELL_TYPE") == "prompt_toolkit2" else "\n" - print_color(tokens, end=end) - else: - print(printed_val) # black & white case - builtins._ = value - - -class XonshMode(enum.Enum): - single_command = 0 - script_from_file = 1 - script_from_stdin = 2 - interactive = 3 - - -def start_services(shell_kwargs, args): - """Starts up the essential services in the proper order. - This returns the environment instance as a convenience. - """ - install_import_hooks() - # create execer, which loads builtins - ctx = shell_kwargs.get("ctx", {}) - debug = to_bool_or_int(os.getenv("XONSH_DEBUG", "0")) - events.on_timingprobe.fire(name="pre_execer_init") - execer = Execer( - xonsh_ctx=ctx, - debug_level=debug, - scriptcache=shell_kwargs.get("scriptcache", True), - cacheall=shell_kwargs.get("cacheall", False), - ) - events.on_timingprobe.fire(name="post_execer_init") - # load rc files - login = shell_kwargs.get("login", True) - env = builtins.__xonsh__.env - rc = shell_kwargs.get("rc", None) - rc = env.get("XONSHRC") if rc is None else rc - if args.mode != XonshMode.interactive and not args.force_interactive: - # Don't load xonshrc if not interactive shell - rc = None - events.on_pre_rc.fire() - xonshrc_context(rcfiles=rc, execer=execer, ctx=ctx, env=env, login=login) - events.on_post_rc.fire() - # create shell - builtins.__xonsh__.shell = Shell(execer=execer, **shell_kwargs) - ctx["__name__"] = "__main__" - return env + pprint(value) -def premain(argv=None): - """Setup for main xonsh entry point. Returns parsed arguments.""" - if argv is None: - argv = sys.argv[1:] - builtins.__xonsh__ = XonshSession() - setup_timings(argv) - setproctitle = get_setproctitle() - if setproctitle is not None: - setproctitle(" ".join(["xonsh"] + argv)) - args = parser.parse_args(argv) - if args.help: - parser.print_help() - parser.exit() - if args.version: - version = "/".join(("xonsh", __version__)) - print(version) - parser.exit() - shell_kwargs = { - "shell_type": args.shell_type, - "completer": False, - "login": False, - "scriptcache": args.scriptcache, - "cacheall": args.cacheall, - "ctx": builtins.__xonsh__.ctx, - } - if args.login: - shell_kwargs["login"] = True +def main(argv=None): + """Main entry point for xonsh cli.""" + args = parser.parse_args() + shell_kwargs = {'shell_type': args.shell_type} if args.norc: - shell_kwargs["rc"] = () - elif args.rc: - shell_kwargs["rc"] = args.rc - setattr(sys, "displayhook", _pprint_displayhook) - print('Gitsome Version: ' + gitsome_version) + shell_kwargs['ctx'] = {} + setattr(sys, 'displayhook', _pprint_displayhook) + shell = builtins.__xonsh_shell__ = Shell(**shell_kwargs) + from xonsh import imphooks + env = builtins.__xonsh_env__ + if args.defines is not None: + env.update([x.split('=', 1) for x in args.defines]) + env['XONSH_INTERACTIVE'] = False + click.echo('Version: ' + gitsome_version) if args.command is not None: - args.mode = XonshMode.single_command - shell_kwargs["shell_type"] = "none" + # run a single command and exit + shell.default(args.command) elif args.file is not None: - args.mode = XonshMode.script_from_file - shell_kwargs["shell_type"] = "none" - elif not sys.stdin.isatty() and not args.force_interactive: - args.mode = XonshMode.script_from_stdin - shell_kwargs["shell_type"] = "none" - else: - args.mode = XonshMode.interactive - shell_kwargs["completer"] = True - shell_kwargs["login"] = True - env = start_services(shell_kwargs, args) - env["XONSH_LOGIN"] = shell_kwargs["login"] - if args.defines is not None: - env.update([x.split("=", 1) for x in args.defines]) - env["XONSH_INTERACTIVE"] = args.force_interactive or ( - args.mode == XonshMode.interactive - ) - if ON_WINDOWS: - setup_win_unicode_console(env.get("WIN_UNICODE_CONSOLE", True)) - return args - - -def _failback_to_other_shells(args, err): - # only failback for interactive shell; if we cannot tell, treat it - # as an interactive one for safe. - if hasattr(args, "mode") and args.mode != XonshMode.interactive: - raise err - foreign_shell = None - shells_file = "/etc/shells" - if not os.path.exists(shells_file): - # right now, it will always break here on Windows - raise err - excluded_list = ["xonsh", "screen"] - with open(shells_file) as f: - for line in f: - line = line.strip() - if not line or line.startswith("#"): - continue - if "/" not in line: - continue - _, shell = line.rsplit("/", 1) - if shell in excluded_list: - continue - if not os.path.exists(line): - continue - foreign_shell = line - break - if foreign_shell: - traceback.print_exc() - print("Xonsh encountered an issue during launch", file=sys.stderr) - print("Failback to {}".format(foreign_shell), file=sys.stderr) - os.execlp(foreign_shell, foreign_shell) + # run a script contained in a file + if os.path.isfile(args.file): + with open(args.file) as f: + code = f.read() + code = code if code.endswith('\n') else code + '\n' + env['ARGS'] = [args.file] + args.args + code = shell.execer.compile(code, mode='exec', glbs=shell.ctx) + shell.execer.exec(code, mode='exec', glbs=shell.ctx) + else: + print('xonsh: {0}: No such file or directory.'.format(args.file)) + elif not sys.stdin.isatty(): + # run a script given on stdin + code = sys.stdin.read() + code = code if code.endswith('\n') else code + '\n' + code = shell.execer.compile(code, mode='exec', glbs=shell.ctx) + shell.execer.exec(code, mode='exec', glbs=shell.ctx) else: - raise err - - -def main(argv=None): - args = None - try: - args = premain(argv) - return main_xonsh(args) - except Exception as err: - _failback_to_other_shells(args, err) - - -def main_xonsh(args): - """Main entry point for xonsh cli.""" - if not ON_WINDOWS: - - def func_sig_ttin_ttou(n, f): - pass - - signal.signal(signal.SIGTTIN, func_sig_ttin_ttou) - signal.signal(signal.SIGTTOU, func_sig_ttin_ttou) - - events.on_post_init.fire() - env = builtins.__xonsh__.env - shell = builtins.__xonsh__.shell - try: - if args.mode == XonshMode.interactive: - # enter the shell - env["XONSH_INTERACTIVE"] = True - ignore_sigtstp() - if env["XONSH_INTERACTIVE"] and not any( - os.path.isfile(i) for i in env["XONSHRC"] - ): - print_welcome_screen() - events.on_pre_cmdloop.fire() - try: - shell.shell.cmdloop() - finally: - events.on_post_cmdloop.fire() - elif args.mode == XonshMode.single_command: - # run a single command and exit - run_code_with_cache(args.command.lstrip(), shell.execer, mode="single") - elif args.mode == XonshMode.script_from_file: - # run a script contained in a file - path = os.path.abspath(os.path.expanduser(args.file)) - if os.path.isfile(path): - sys.argv = [args.file] + args.args - env.update(make_args_env()) # $ARGS is not sys.argv - env["XONSH_SOURCE"] = path - shell.ctx.update({"__file__": args.file, "__name__": "__main__"}) - run_script_with_cache( - args.file, shell.execer, glb=shell.ctx, loc=None, mode="exec" - ) - else: - print("xonsh: {0}: No such file or directory.".format(args.file)) - elif args.mode == XonshMode.script_from_stdin: - # run a script given on stdin - code = sys.stdin.read() - run_code_with_cache( - code, shell.execer, glb=shell.ctx, loc=None, mode="exec" - ) - finally: - events.on_exit.fire() - postmain(args) - - -def postmain(args=None): - """Teardown for main xonsh entry point, accepts parsed arguments.""" - if ON_WINDOWS: - setup_win_unicode_console(enable=False) - builtins.__xonsh__.shell = None - - -@contextlib.contextmanager -def main_context(argv=None): - """Generator that runs pre- and post-main() functions. This has two iterations. - The first yields the shell. The second returns None but cleans - up the shell. - """ - args = premain(argv) - yield builtins.__xonsh__.shell - postmain(args) - + # otherwise, enter the shell + env['XONSH_INTERACTIVE'] = True + ignore_sigtstp() + shell.cmdloop() + del builtins.__xonsh_shell__ -def setup( - ctx=None, - shell_type="none", - env=(("RAISE_SUBPROC_ERROR", True),), - aliases=(), - xontribs=(), - threadable_predictors=(), -): - """Starts up a new xonsh shell. Calling this in function in another - packages __init__.py will allow xonsh to be fully used in the - package in headless or headed mode. This function is primarily indended to - make starting up xonsh for 3rd party packages easier. - Parameters - ---------- - ctx : dict-like or None, optional - The xonsh context to start with. If None, an empty dictionary - is provided. - shell_type : str, optional - The type of shell to start. By default this is 'none', indicating - we should start in headless mode. - env : dict-like, optional - Environment to update the current environment with after the shell - has been initialized. - aliases : dict-like, optional - Aliases to add after the shell has been initialized. - xontribs : iterable of str, optional - Xontrib names to load. - threadable_predictors : dict-like, optional - Threadable predictors to start up with. These overide the defaults. - """ - ctx = {} if ctx is None else ctx - # setup xonsh ctx and execer - if not hasattr(builtins, "__xonsh__"): - execer = Execer(xonsh_ctx=ctx) - builtins.__xonsh__ = XonshSession(ctx=ctx, execer=execer) - load_builtins(ctx=ctx, execer=execer) - load_proxies() - builtins.__xonsh__.shell = Shell(execer, ctx=ctx, shell_type=shell_type) - builtins.__xonsh__.env.update(env) - install_import_hooks() - builtins.aliases.update(aliases) - if xontribs: - xontribs_load(xontribs) - tp = builtins.__xonsh__.commands_cache.threadable_predictors - tp.update(threadable_predictors) +if __name__ == '__main__': + main() diff --git a/xonsh/openpy.py b/xonsh/openpy.py index 9f5162d..a07e8cc 100644 --- a/xonsh/openpy.py +++ b/xonsh/openpy.py @@ -1,7 +1,6 @@ -# -*- coding: utf-8 -*- -"""Tools to open ``*.py`` files as Unicode. - -Uses the encoding specified within the file, as per PEP 263. +""" +Tools to open ``*.py`` files as Unicode, using the encoding specified within the +file, as per PEP 263. Much of the code is taken from the tokenize module in Python 3.2. @@ -14,19 +13,125 @@ """ import io import re +import os.path +from io import TextIOWrapper, BytesIO + +from xonsh.tools import unicode_type + +cookie_re = re.compile(r"coding[:=]\s*([-\w.]+)", re.UNICODE) +cookie_comment_re = re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE) + +try: + # Available in Python 3 + from tokenize import detect_encoding +except ImportError: + from codecs import lookup, BOM_UTF8 + + # Copied from Python 3.2 tokenize + def _get_normal_name(orig_enc): + """Imitates get_normal_name in tokenizer.c.""" + # Only care about the first 12 characters. + enc = orig_enc[:12].lower().replace("_", "-") + if enc == "utf-8" or enc.startswith("utf-8-"): + return "utf-8" + if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or \ + enc.startswith(("latin-1-", "iso-8859-1-", "iso-latin-1-")): + return "iso-8859-1" + return orig_enc + + # Copied from Python 3.2 tokenize + def detect_encoding(readline): + """ + The detect_encoding() function is used to detect the encoding that + should be used to decode a Python source file. It requires one + argment, readline, in the same way as the tokenize() generator. + + It will call readline a maximum of twice, and return the encoding used + (as a string) and a list of any lines (left as bytes) it has read in. + + It detects the encoding from the presence of a utf-8 bom or an encoding + cookie as specified in pep-0263. If both a bom and a cookie are + present, but disagree, a SyntaxError will be raised. If the encoding + cookie is an invalid charset, raise a SyntaxError. Note that if a + utf-8 bom is found, 'utf-8-sig' is returned. + + If no encoding is specified, then the default of 'utf-8' will be + returned. + """ + bom_found = False + encoding = None + default = 'utf-8' + + def read_or_stop(): + try: + return readline() + except StopIteration: + return b'' -from xonsh.lazyasd import LazyObject -from xonsh.tokenize import detect_encoding, tokopen + def find_cookie(line): + try: + line_string = line.decode('ascii') + except UnicodeDecodeError: + return None + matches = cookie_re.findall(line_string) + if not matches: + return None + encoding = _get_normal_name(matches[0]) + try: + codec = lookup(encoding) + except LookupError: + # This behaviour mimics the Python interpreter + raise SyntaxError("unknown encoding: " + encoding) -cookie_comment_re = LazyObject( - lambda: re.compile(r"^\s*#.*coding[:=]\s*([-\w.]+)", re.UNICODE), - globals(), - "cookie_comment_re", -) + if bom_found: + if codec.name != 'utf-8': + # This behaviour mimics the Python interpreter + raise SyntaxError('encoding problem: utf-8') + encoding += '-sig' + return encoding + first = read_or_stop() + if first.startswith(BOM_UTF8): + bom_found = True + first = first[3:] + default = 'utf-8-sig' + if not first: + return default, [] -def source_to_unicode(txt, errors="replace", skip_encoding_cookie=True): + encoding = find_cookie(first) + if encoding: + return encoding, [first] + + second = read_or_stop() + if not second: + return default, [first] + + encoding = find_cookie(second) + if encoding: + return encoding, [first, second] + + return default, [first, second] + + +try: + # Available in Python 3.2 and above. + from tokenize import tokopen +except ImportError: + # Copied from Python 3.2 tokenize + def tokopen(filename): + """Open a file in read only mode using the encoding detected by + detect_encoding(). + """ + buf = io.open(filename, 'rb') # Tweaked to use io.open for Python 2 + encoding, lines = detect_encoding(buf.readline) + buf.seek(0) + text = TextIOWrapper(buf, encoding, line_buffering=True) + text.mode = 'r' + return text + + +def source_to_unicode(txt, errors='replace', skip_encoding_cookie=True): """Converts a bytes string with python source code to unicode. Unicode strings are passed through unchanged. Byte strings are checked @@ -34,10 +139,10 @@ def source_to_unicode(txt, errors="replace", skip_encoding_cookie=True): txt can be either a bytes buffer or a string containing the source code. """ - if isinstance(txt, str): + if isinstance(txt, unicode_type): return txt if isinstance(txt, bytes): - buf = io.BytesIO(txt) + buf = BytesIO(txt) else: buf = txt try: @@ -45,8 +150,8 @@ def source_to_unicode(txt, errors="replace", skip_encoding_cookie=True): except SyntaxError: encoding = "ascii" buf.seek(0) - text = io.TextIOWrapper(buf, encoding, errors=errors, line_buffering=True) - text.mode = "r" + text = TextIOWrapper(buf, encoding, errors=errors, line_buffering=True) + text.mode = 'r' if skip_encoding_cookie: return u"".join(strip_encoding_cookie(text)) else: @@ -67,6 +172,7 @@ def strip_encoding_cookie(filelike): yield second except StopIteration: return + for line in it: yield line @@ -94,7 +200,7 @@ def read_py_file(filename, skip_encoding_cookie=True): return f.read() -def read_py_url(url, errors="replace", skip_encoding_cookie=True): +def read_py_url(url, errors='replace', skip_encoding_cookie=True): """Read a Python file from a URL, using the encoding declared inside the file. Parameters @@ -133,3 +239,22 @@ def readline(): return next(x) return readline + +# Code for going between .py files and cached .pyc files ---------------------- + +try: # Python 3.2, see PEP 3147 + from imp import source_from_cache, cache_from_source # pylint:disable=unused-import +except ImportError: + # Python <= 3.1: .pyc files go next to .py + def source_from_cache(path): + basename, ext = os.path.splitext(path) + if ext not in ('.pyc', '.pyo'): + raise ValueError('Not a cached Python file extension', ext) + # Should we look for .pyw files? + return basename + '.py' + + def cache_from_source(path, debug_override=None): + if debug_override is None: + debug_override = __debug__ + basename, _ = os.path.splitext(path) + return basename + '.pyc' if debug_override else '.pyo' diff --git a/xonsh/parser.py b/xonsh/parser.py index f70a05c..f0f1ee6 100644 --- a/xonsh/parser.py +++ b/xonsh/parser.py @@ -1,15 +1,2505 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh parser.""" -from xonsh.lazyasd import lazyobject -from xonsh.platform import PYTHON_VERSION_INFO - - -@lazyobject -def Parser(): - if PYTHON_VERSION_INFO > (3, 6): - from xonsh.parsers.v36 import Parser as p - elif PYTHON_VERSION_INFO > (3, 5): - from xonsh.parsers.v35 import Parser as p - else: - from xonsh.parsers.v34 import Parser as p - return p +"""Implements the xonsh parser""" +import os +import sys +from collections import Iterable, Sequence, Mapping + +from ply import yacc + +from xonsh import ast +from xonsh.lexer import Lexer +from xonsh.tools import VER_3_4, VER_3_5, VER_MAJOR_MINOR, V_MAJOR_MINOR, \ + docstring_by_version + + +class Location(object): + """Location in a file.""" + + def __init__(self, fname, lineno, column=None): + """Takes a filename, line number, and optionally a column number.""" + self.fname = fname + self.lineno = lineno + self.column = column + + def __str__(self): + s = '{0}:{1}'.format(self.fname, self.lineno) + if self.column is not None: + s += ':{0}'.format(self.column) + return s + + +def has_elts(x): + """Tests if x is an AST node with elements.""" + return isinstance(x, ast.AST) and hasattr(x, 'elts') + + +def ensure_has_elts(x, lineno=1, col_offset=1): + """Ensures that x is an AST node with elements.""" + if not has_elts(x): + if not isinstance(x, Iterable): + x = [x] + x = ast.Tuple(elts=x, + ctx=ast.Load(), + lineno=lineno, + col_offset=col_offset) + return x + + +def empty_list(lineno=None, col=None): + """Creates the AST node for an empty list.""" + return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col) + + +def binop(x, op, y, lineno=None, col=None): + """Creates the AST node for a binary operation.""" + return ast.BinOp(left=x, op=op, right=y, lineno=lineno, col_offset=col) + + +def call_split_lines(x, lineno=None, col=None): + """Creates the AST node for calling the 'splitlines' attribute of an + object, nominally a string. + """ + return ast.Call(func=ast.Attribute(value=x, + attr='splitlines', + ctx=ast.Load(), + lineno=lineno, + col_offset=col), + args=[], + keywords=[], + starargs=None, + kwargs=None, + lineno=lineno, + col_offset=col) + + +def ensure_list_from_str_or_list(x, lineno=None, col=None): + """Creates the AST node for the following expression:: + + [x] if isinstance(x, str) else x + + Somewhat useful. + """ + return ast.IfExp(test=ast.Call(func=ast.Name(id='isinstance', + ctx=ast.Load(), + lineno=lineno, + col_offset=col), + args=[x, ast.Name(id='str', + ctx=ast.Load(), + lineno=lineno, + col_offset=col)], + keywords=[], + starargs=None, + kwargs=None, + lineno=lineno, + col_offset=col), + body=ast.List(elts=[x], + ctx=ast.Load(), + lineno=lineno, + col_offset=col), + orelse=x, + lineno=lineno, + col_offset=col) + + +def xonsh_call(name, args, lineno=None, col=None): + """Creates the AST node for calling a function of a given name.""" + return ast.Call(func=ast.Name(id=name, + ctx=ast.Load(), + lineno=lineno, + col_offset=col), + args=args, + keywords=[], + starargs=None, + kwargs=None, + lineno=lineno, + col_offset=col) + + +def xonsh_help(x, lineno=None, col=None): + """Creates the AST node for calling the __xonsh_help__() function.""" + return xonsh_call('__xonsh_help__', [x], lineno=lineno, col=col) + + +def xonsh_superhelp(x, lineno=None, col=None): + """Creates the AST node for calling the __xonsh_superhelp__() function.""" + return xonsh_call('__xonsh_superhelp__', [x], lineno=lineno, col=col) + + +def xonsh_regexpath(x, lineno=None, col=None): + """Creates the AST node for calling the __xonsh_regexpath__() function.""" + return xonsh_call('__xonsh_regexpath__', [x], lineno=lineno, col=col) + + +def load_ctx(x): + """Recursively sets ctx to ast.Load()""" + if not hasattr(x, 'ctx'): + return + x.ctx = ast.Load() + if isinstance(x, (ast.Tuple, ast.List)): + for e in x.elts: + load_ctx(e) + elif isinstance(x, ast.Starred): + load_ctx(x.value) + + +def store_ctx(x): + """Recursively sets ctx to ast.Store()""" + if not hasattr(x, 'ctx'): + return + x.ctx = ast.Store() + if isinstance(x, (ast.Tuple, ast.List)): + for e in x.elts: + store_ctx(e) + elif isinstance(x, ast.Starred): + store_ctx(x.value) + + +def empty_list_if_newline(x): + return [] if x == '\n' else x + + +class Parser(object): + """A class that parses the xonsh language.""" + + def __init__(self, + lexer_optimize=True, + lexer_table='xonsh.lexer_table', + yacc_optimize=True, + yacc_table='xonsh.parser_table', + yacc_debug=False, + outputdir=None): + """Parameters + ---------- + lexer_optimize : bool, optional + Set to false when unstable and true when lexer is stable. + lexer_table : str, optional + Lexer module used when optimized. + yacc_optimize : bool, optional + Set to false when unstable and true when parser is stable. + yacc_table : str, optional + Parser module used when optimized. + yacc_debug : debug, optional + Dumps extra debug info. + outputdir : str or None, optional + The directory to place generated tables within. + """ + self.lexer = lexer = Lexer() + self.tokens = lexer.tokens + + opt_rules = [ + 'newlines', 'arglist', 'func_call', 'rarrow_test', 'typedargslist', + 'equals_test', 'colon_test', 'tfpdef', 'comma_tfpdef_list', + 'comma_pow_tfpdef', 'vfpdef', 'comma_vfpdef_list', + 'comma_pow_vfpdef', 'equals_yield_expr_or_testlist_list', + 'testlist', 'as_name', 'period_or_ellipsis_list', + 'comma_import_as_name_list', 'comma_dotted_as_name_list', + 'comma_name_list', 'comma_test', 'elif_part_list', 'finally_part', + 'varargslist', 'or_and_test_list', 'and_not_test_list', + 'comp_op_expr_list', 'xor_and_expr_list', + 'ampersand_shift_expr_list', 'shift_arith_expr_list', + 'op_factor_list', 'trailer_list', 'testlist_comp', + 'yield_expr_or_testlist_comp', 'dictorsetmaker', + 'comma_subscript_list', 'test', 'sliceop', 'comp_iter', + 'yield_arg', 'test_comma_list',] + if VER_MAJOR_MINOR <= VER_3_4: + opt_rules += ['argument_comma_list', 'comma_argument_list',] + for rule in opt_rules: + self._opt_rule(rule) + + list_rules = [ + 'comma_tfpdef', 'comma_vfpdef', 'semi_small_stmt', + 'comma_test_or_star_expr', 'period_or_ellipsis', + 'comma_import_as_name', 'comma_dotted_as_name', 'period_name', + 'comma_name', 'elif_part', 'except_part', 'comma_with_item', + 'or_and_test', 'and_not_test', 'comp_op_expr', 'pipe_xor_expr', + 'xor_and_expr', 'ampersand_shift_expr', 'shift_arith_expr', + 'pm_term', 'op_factor', 'trailer', 'comma_subscript', + 'comma_expr_or_star_expr', 'comma_test', 'comma_argument', 'comma_item', + 'attr_period_name', 'test_comma', 'equals_yield_expr_or_testlist', + 'test_or_star_expr'] + if VER_MAJOR_MINOR <= VER_3_4: + list_rules += ['argument_comma',] + for rule in list_rules: + self._list_rule(rule) + + yacc_kwargs = dict(module=self, + debug=yacc_debug, + start='start_symbols', + optimize=yacc_optimize, + tabmodule=yacc_table) + if not yacc_debug: + yacc_kwargs['errorlog'] = yacc.NullLogger() + if outputdir is not None: + yacc_kwargs['outputdir'] = outputdir + self.parser = yacc.yacc(**yacc_kwargs) + + # Keeps track of the last token given to yacc (the lookahead token) + self._last_yielded_token = None + + def reset(self): + """Resets for clean parsing.""" + self.lexer.reset() + self._last_yielded_token = None + + def parse(self, s, filename='', mode='exec', debug_level=0): + """Returns an abstract syntax tree of xonsh code. + + Parameters + ---------- + s : str + The xonsh code. + filename : str, optional + Name of the file. + mode : str, optional + Execution mode, one of: exec, eval, or single. + debug_level : str, optional + Debugging level passed down to yacc. + + Returns + ------- + tree : AST + """ + self.reset() + self.lexer.fname = filename + tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level) + # hack for getting modes right + if mode == 'single': + if isinstance(tree, ast.Expression): + tree = ast.Interactive(body=[self.expr(tree.body)]) + elif isinstance(tree, ast.Module): + tree = ast.Interactive(body=tree.body) + return tree + + def _lexer_errfunc(self, msg, line, column): + self._parse_error(msg, self.currloc(line, column)) + + def _yacc_lookahead_token(self): + """Gets the last token seen by the lexer.""" + return self.lexer.last + + def _opt_rule(self, rulename): + """For a rule name, creates an associated optional rule. + '_opt' is appended to the rule name. + """ + + def optfunc(self, p): + p[0] = p[1] + + optfunc.__doc__ = ('{0}_opt : empty\n' + ' | {0}').format(rulename) + optfunc.__name__ = 'p_' + rulename + '_opt' + setattr(self.__class__, optfunc.__name__, optfunc) + + def _list_rule(self, rulename): + """For a rule name, creates an associated list rule. + '_list' is appended to the rule name. + """ + + def listfunc(self, p): + p[0] = p[1] if len(p) == 2 else p[1] + p[2] + + listfunc.__doc__ = ('{0}_list : {0}\n' + ' | {0}_list {0}').format(rulename) + listfunc.__name__ = 'p_' + rulename + '_list' + setattr(self.__class__, listfunc.__name__, listfunc) + + def currloc(self, lineno, column=None): + """Returns the current location.""" + return Location(fname=self.lexer.fname, lineno=lineno, column=column) + + def expr(self, p): + """Creates an expression for a token.""" + return ast.Expr(value=p, lineno=p.lineno, col_offset=p.col_offset) + + def token_col(self, t): + """Gets ths token column""" + return t.lexpos + + @property + def lineno(self): + if self.lexer.last is None: + return 0 + else: + return self.lexer.last.lineno + + @property + def col(self): + t = self._yacc_lookahead_token() + if t is not None: + return self.token_col(t) + return 1 + + def _parse_error(self, msg, loc): + err = SyntaxError('{0}: {1}'.format(loc, msg)) + err.loc = loc + raise err + + # + # Precedence of operators + # + precedence = (('left', 'PIPE'), ('left', 'XOR'), ('left', 'AMPERSAND'), + ('left', 'EQ', 'NE'), ('left', 'GT', 'GE', 'LT', 'LE'), + ('left', 'RSHIFT', 'LSHIFT'), ('left', 'PLUS', 'MINUS'), + ('left', 'TIMES', 'DIVIDE', 'DOUBLEDIV', 'MOD'), + ('left', 'POW'), ) + + # + # Grammar as defined by BNF + # + + def p_start_symbols(self, p): + """start_symbols : single_input + | file_input + | eval_input + | empty + """ + p[0] = p[1] + + def p_single_input(self, p): + """single_input : compound_stmt NEWLINE + """ + p1 = empty_list_if_newline(p[1]) + p0 = ast.Interactive(body=p1) + p[0] = p0 + + def p_file_input(self, p): + """file_input : file_stmts""" + p[0] = ast.Module(body=p[1]) + + def p_file_stmts(self, p): + """file_stmts : newline_or_stmt + | file_stmts newline_or_stmt + """ + if len(p) == 2: + # newline_or_stmt ENDMARKER + p1 = empty_list_if_newline(p[1]) + p[0] = p1 + else: + # file_input newline_or_stmt ENDMARKER + p2 = empty_list_if_newline(p[2]) + p[0] = p[1] + p2 + + def p_newline_or_stmt(self, p): + """newline_or_stmt : NEWLINE + | stmt + """ + p[0] = p[1] + + def p_newlines(self, p): + """newlines : NEWLINE + | newlines NEWLINE + """ + p[0] = p[1] if len(p) == 2 else p[1] + p[2] + + def p_eval_input(self, p): + """eval_input : testlist newlines_opt + """ + p[0] = ast.Expression(body=p[1]) + + def p_func_call(self, p): + """func_call : LPAREN arglist_opt RPAREN""" + p[0] = p[2] + + def p_attr_period_name(self, p): + """attr_period_name : PERIOD NAME""" + p[0] = [p[2]] + + def p_attr_name(self, p): + """attr_name : NAME + | NAME attr_period_name_list + """ + p1 = p[1] + name = ast.Name(id=p1, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + if len(p) == 2: + p0 = name + else: + p2 = p[2] + p0 = ast.Attribute(value=name, + attr=p2[0], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + for a in p2[1:]: + p0 = ast.Attribute(value=p0, + attr=a, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_decorator(self, p): + """decorator : AT attr_name NEWLINE + | AT attr_name func_call NEWLINE + """ + lenp = len(p) + name = p[2] + p3 = p[3] if lenp > 3 else None + if lenp == 4: + p0 = name + elif p3 is None: + p0 = ast.Call(func=name, + args=[], + keywords=[], + starargs=None, + kwargs=None, + lineno=self.lineno, + col_offset=self.col) + else: + p0 = ast.Call(func=name, + lineno=self.lineno, + col_offset=self.col, **p3) + p[0] = p0 + + def p_decorators(self, p): + """decorators : decorator + | decorators decorator + """ + p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] + + @docstring_by_version( + v34=\ + """classdef_or_funcdef : classdef + | funcdef + """, + v35=\ + """classdef_or_funcdef : classdef + | funcdef + | async_funcdef + """, + ) + def p_classdef_or_funcdef(self, p): + p[0] = p[1] + + def p_decorated(self, p): + """decorated : decorators classdef_or_funcdef""" + p1, p2 = p[1], p[2] + p2[0].decorator_list = p1 + p[0] = p2 + + def p_rarrow_test(self, p): + """rarrow_test : RARROW test""" + p[0] = p[2] + + def p_funcdef(self, p): + """funcdef : DEF NAME parameters rarrow_test_opt COLON suite""" + f = ast.FunctionDef(name=p[2], + args=p[3], + returns=p[4], + body=p[6], + decorator_list=[], + lineno=self.lineno, + col_offset=self.col) + p[0] = [f] + + def p_async_funcdef(self, p): + """async_funcdef : ASYNC funcdef""" + f = p[2][0] + p[0] = [ast.AsyncFunctionDef(**f.__dict__)] + + def p_parameters(self, p): + """parameters : LPAREN typedargslist_opt RPAREN""" + p2 = p[2] + if p2 is None: + p2 = ast.arguments(args=[], + vararg=None, + kwonlyargs=[], + kw_defaults=[], + kwarg=None, + defaults=[]) + p[0] = p2 + + def p_equals_test(self, p): + """equals_test : EQUALS test""" + p[0] = p[2] + + def p_typedargslist(self, p): + """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt + | tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt COMMA POW vfpdef + | tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list_opt + | tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list COMMA POW tfpdef + | tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt POW tfpdef + | TIMES tfpdef_opt comma_tfpdef_list comma_pow_tfpdef_opt + | TIMES tfpdef_opt comma_pow_tfpdef_opt + | POW tfpdef + """ + lenp = len(p) + p1, p2 = p[1], p[2] + p3 = p[3] if lenp > 3 else None + p4 = p[4] if lenp > 4 else None + # skip p5 + p6 = p[6] if lenp > 6 else None + p7 = p[7] if lenp > 7 else None + # skip p8 + p9 = p[9] if lenp > 9 else None + p10 = p[10] if lenp > 10 else None + p0 = ast.arguments(args=[], + vararg=None, + kwonlyargs=[], + kw_defaults=[], + kwarg=None, + defaults=[]) + if lenp == 3: + p0.kwarg = p2 + elif lenp == 4: + self._set_var_args(p0, p2, None) + p0.kwarg = p3 + elif lenp == 5 and p1 != '*': + # x + self._set_regular_args(p0, p1, p2, p3, p4) + elif lenp == 5 and p1 == '*': + self._set_var_args(p0, p2, p3) # *args + if p4 is not None: + # *args, x, **kwargs + p0.kwarg = p4 + elif lenp == 7: + # x, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + p0.kwarg = p[6] + elif lenp == 8: + # x, *args + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, p7) + elif lenp == 10: + # x, *args, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, None) + p0.kwarg = p9 + elif lenp == 11: + # x, *args, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, p7) + p0.kwarg = p10 + else: + assert False + p[0] = p0 + + def p_colon_test(self, p): + """colon_test : COLON test""" + p[0] = p[2] + + def p_tfpdef(self, p): + """tfpdef : NAME colon_test_opt""" + p[0] = ast.arg(arg=p[1], annotation=p[2]) + + def p_comma_tfpdef(self, p): + """comma_tfpdef : COMMA + | COMMA tfpdef equals_test_opt + """ + if len(p) == 2: + p[0] = [] + else: + p[0] = [{'arg': p[2], 'default': p[3]}] + + def p_comma_pow_tfpdef(self, p): + """comma_pow_tfpdef : COMMA POW tfpdef""" + p[0] = p[3] + + def _set_args_def(self, argmts, vals, kwargs=False): + args, defs = (argmts.kwonlyargs, argmts.kw_defaults) if kwargs else \ + (argmts.args, argmts.defaults) + for v in vals: + args.append(v['arg']) + d = v['default'] + if kwargs or (d is not None): + defs.append(d) + + def _set_regular_args(self, p0, p1, p2, p3, p4): + if p2 is None and p3 is None: + # x + p0.args.append(p1) + elif p2 is not None and p3 is None: + # x=42 + p0.args.append(p1) + p0.defaults.append(p2) + elif p2 is None and p3 is not None: + # x, y and x, y=42 + p0.args.append(p1) + self._set_args_def(p0, p3) + else: + # x=42, y=42 + p0.args.append(p1) + p0.defaults.append(p2) + self._set_args_def(p0, p3) + + def _set_var_args(self, p0, vararg, kwargs): + if vararg is None: + self._set_args_def(p0, kwargs, kwargs=True) + elif vararg is not None and kwargs is None: + # *args + p0.vararg = vararg + elif vararg is not None and kwargs is not None: + # *args, x and *args, x, y and *args, x=10 and *args, x=10, y + # and *args, x, y=10, and *args, x=42, y=65 + p0.vararg = vararg + self._set_args_def(p0, kwargs, kwargs=True) + else: + assert False + + def p_varargslist(self, p): + """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt + | vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt COMMA POW vfpdef + | vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list_opt + | vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list COMMA POW vfpdef + | vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt POW vfpdef + | TIMES vfpdef_opt comma_vfpdef_list comma_pow_vfpdef_opt + | TIMES vfpdef_opt comma_pow_vfpdef_opt + | POW vfpdef + """ + lenp = len(p) + p1, p2 = p[1], p[2] + p3 = p[3] if lenp > 3 else None + p4 = p[4] if lenp > 4 else None + # skip p5 + p6 = p[6] if lenp > 6 else None + p7 = p[7] if lenp > 7 else None + # skip p8 + p9 = p[9] if lenp > 9 else None + p10 = p[10] if lenp > 10 else None + p0 = ast.arguments(args=[], + vararg=None, + kwonlyargs=[], + kw_defaults=[], + kwarg=None, + defaults=[]) + if lenp == 3: + p0.kwarg = p2 + elif lenp == 4: + self._set_var_args(p0, p2, None) + p0.kwarg = p3 + elif lenp == 5 and p1 != '*': + # x + self._set_regular_args(p0, p1, p2, p3, p4) + elif lenp == 5 and p1 == '*': + self._set_var_args(p0, p2, p3) # *args + if p4 is not None: + # *args, x, **kwargs + p0.kwarg = p4 + elif lenp == 7: + # x, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + p0.kwarg = p6 + elif lenp == 8: + # x, *args + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, p7) + elif lenp == 10: + # x, *args, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, None) + p0.kwarg = p9 + elif lenp == 11: + # x, *args, **kwargs + self._set_regular_args(p0, p1, p2, p3, p4) + self._set_var_args(p0, p6, p7) + p0.kwarg = p10 + else: + assert False + p[0] = p0 + + def p_vfpdef(self, p): + """vfpdef : NAME""" + p[0] = ast.arg(arg=p[1], annotation=None) + + def p_comma_vfpdef(self, p): + """comma_vfpdef : COMMA + | COMMA vfpdef equals_test_opt + """ + if len(p) == 2: + p[0] = [] + else: + p[0] = [{'arg': p[2], 'default': p[3]}] + + def p_comma_pow_vfpdef(self, p): + """comma_pow_vfpdef : COMMA POW vfpdef""" + p[0] = p[3] + + def p_stmt(self, p): + """stmt : simple_stmt + | compound_stmt + """ + p[0] = p[1] + + def p_stmt_list(self, p): + """stmt_list : stmt + | stmt_list stmt""" + if len(p) == 2: + p[0] = p[1] + else: + p[0] = p[1] + p[2] + + def p_semi_opt(self, p): + """semi_opt : SEMI + | empty + """ + if len(p) == 2: + p[0] = p[1] + + def p_semi_small_stmt(self, p): + """semi_small_stmt : SEMI small_stmt""" + p[0] = [p[2]] + + def p_simple_stmt(self, p): + """simple_stmt : small_stmt semi_small_stmt_list semi_opt NEWLINE + | small_stmt semi_opt NEWLINE + """ + p1, p2 = p[1], p[2] + p0 = [p1] + if p2 is not None and p2 != ';': + p0 += p2 + p[0] = p0 + + def p_small_stmt(self, p): + """small_stmt : expr_stmt + | del_stmt + | pass_stmt + | flow_stmt + | import_stmt + | global_stmt + | nonlocal_stmt + | assert_stmt + """ + p[0] = p[1] + + _augassign_op = { + '+=': ast.Add, + '-=': ast.Sub, + '*=': ast.Mult, + '@=': ast.MatMult, + '/=': ast.Div, + '%=': ast.Mod, + '//=': ast.FloorDiv, + '**=': ast.Pow, + '^=': ast.BitXor, + '&=': ast.BitAnd, + '|=': ast.BitOr, + '<<=': ast.LShift, + '>>=': ast.RShift + } + + def p_expr_stmt(self, p): + """expr_stmt : testlist_star_expr augassign yield_expr_or_testlist + | testlist_star_expr equals_yield_expr_or_testlist_list_opt + | testlist equals_yield_expr_or_testlist_list_opt + | test_comma_list_opt star_expr comma_test_list equals_yield_expr_or_testlist + | test_comma_list_opt star_expr comma_opt test_comma_list_opt equals_yield_expr_or_testlist + """ + lenp = len(p) + p1, p2 = p[1], p[2] + p1 = [] if p1 is None else p1 + if isinstance(p1, ast.Tuple): + p1 = [p1] + for targ in p1: + store_ctx(targ) + if lenp == 3: + if p2 is None and len(p1) == 1: + load_ctx(p1[0]) + p0 = self.expr(p1[0]) + elif p2 is None: + assert False + else: + list(map(store_ctx, p2[:-1])) + p0 = ast.Assign(targets=p1 + p2[:-1], + value=p2[-1], + lineno=self.lineno, + col_offset=self.col) + elif lenp == 4: + op = self._augassign_op[p2] + if op is None: + self._parse_error('operation {0!r} not supported'.format(p2), + self.currloc(lineno=p.lineno, column=p.lexpos)) + p0 = ast.AugAssign(target=p1[0], op=op(), value=p[3], + lineno=self.lineno, col_offset=self.col) + elif lenp == 5 or lenp == 6: + if lenp == 5: + targs, rhs = p[3], p[4][0] + else: + targs, rhs = (p[4] or []), p[5][0] + store_ctx(p2) + for targ in targs: + store_ctx(targ) + p1.append(p2) + p1.extend(targs) + p1 = [ast.Tuple(elts=p1, + ctx=ast.Store(), + lineno=self.lineno, + col_offset=self.col)] + p0 = ast.Assign(targets=p1, + value=rhs, + lineno=self.lineno, + col_offset=self.col) + else: + assert False + p[0] = p0 + + def p_test_comma(self, p): + """test_comma : test COMMA""" + p[0] = [p[1]] + + def p_comma_opt(self, p): + """comma_opt : COMMA + | empty + """ + if len(p) == 2: + p[0] = p[1] + + def p_test_or_star_expr(self, p): + """test_or_star_expr : test + | star_expr + """ + p[0] = p[1] + + def p_comma_test_or_star_expr(self, p): + """comma_test_or_star_expr : COMMA test_or_star_expr""" + p[0] = [p[2]] + + def p_testlist_star_expr(self, p): + """testlist_star_expr : test_or_star_expr comma_test_or_star_expr_list comma_opt + | test_or_star_expr comma_opt + """ + p1, p2 = p[1], p[2] + if p2 is None: + p0 = [p1] + elif p2 == ',': + p0 = [ast.Tuple(elts=[p1], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col)] + else: + p0 = [ast.Tuple(elts=[p1] + p2, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col)] + p[0] = p0 + + def p_augassign(self, p): + """augassign : PLUSEQUAL + | MINUSEQUAL + | TIMESEQUAL + | ATEQUAL + | DIVEQUAL + | MODEQUAL + | AMPERSANDEQUAL + | PIPEEQUAL + | XOREQUAL + | LSHIFTEQUAL + | RSHIFTEQUAL + | POWEQUAL + | DOUBLEDIVEQUAL + """ + p[0] = p[1] + + def p_yield_expr_or_testlist(self, p): + """yield_expr_or_testlist : yield_expr + | testlist + """ + p[0] = p[1] + + def p_equals_yield_expr_or_testlist(self, p): + """equals_yield_expr_or_testlist : EQUALS yield_expr_or_testlist""" + p[0] = [p[2]] + + # + # For normal assignments, additional restrictions enforced + # by the interpreter + # + def p_del_stmt(self, p): + """del_stmt : DEL exprlist""" + p2 = p[2] + for targ in p2: + targ.ctx = ast.Del() + p0 = ast.Delete(targets=p2, lineno=self.lineno, col_offset=self.col) + p[0] = p0 + + def p_pass_stmt(self, p): + """pass_stmt : PASS""" + p[0] = ast.Pass(lineno=self.lineno, col_offset=self.col) + + def p_flow_stmt(self, p): + """flow_stmt : break_stmt + | continue_stmt + | return_stmt + | raise_stmt + | yield_stmt + """ + p[0] = p[1] + + def p_break_stmt(self, p): + """break_stmt : BREAK""" + p[0] = ast.Break(lineno=self.lineno, col_offset=self.col) + + def p_continue_stmt(self, p): + """continue_stmt : CONTINUE""" + p[0] = ast.Continue(lineno=self.lineno, col_offset=self.col) + + def p_return_stmt(self, p): + """return_stmt : RETURN testlist_opt""" + p[0] = ast.Return(value=p[2], lineno=self.lineno, col_offset=self.col) + + def p_yield_stmt(self, p): + """yield_stmt : yield_expr""" + p[0] = self.expr(p[1]) + + def p_raise_stmt(self, p): + """raise_stmt : RAISE + | RAISE test + | RAISE test FROM test + """ + lenp = len(p) + cause = None + if lenp == 2: + exc = None + elif lenp == 3: + exc = p[2] + elif lenp == 5: + exc = p[2] + cause = p[4] + else: + assert False + p0 = ast.Raise(exc=exc, + cause=cause, + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_import_stmt(self, p): + """import_stmt : import_name + | import_from + """ + p[0] = p[1] + + def p_import_name(self, p): + """import_name : IMPORT dotted_as_names + """ + p[0] = ast.Import(names=p[2], lineno=self.lineno, col_offset=self.col) + + def p_import_from_pre(self, p): + """import_from_pre : FROM period_or_ellipsis_list_opt dotted_name + | FROM period_or_ellipsis_list + """ + if len(p) == 3: + p0 = p[2] + elif len(p) == 4: + p2, p3 = p[2], p[3] + p0 = p3 if p2 is None else p2 + p3 + else: + assert False + p[0] = p0 + + def p_import_from_post(self, p): + """import_from_post : TIMES + | LPAREN import_as_names RPAREN + | import_as_names + """ + if len(p) == 2: + if p[1] == '*': + p[1] = [ast.alias(name='*', asname=None)] + p0 = p[1] + elif len(p) == 4: + p0 = p[2] + else: + assert False + p[0] = p0 + + def p_import_from(self, p): + """import_from : import_from_pre IMPORT import_from_post + """ + # note below: the ('.' | '...') is necessary because '...' is + # tokenized as ELLIPSIS + p1 = p[1] + mod = p1.lstrip('.') + lvl = len(p1) - len(mod) + mod = mod or None + p[0] = ast.ImportFrom(module=mod, + names=p[3], + level=lvl, + lineno=self.lineno, + col_offset=self.col) + + def p_period_or_ellipsis(self, p): + """period_or_ellipsis : PERIOD + | ELLIPSIS + """ + p[0] = p[1] + + def p_as_name(self, p): + """as_name : AS NAME""" + p[0] = p[2] + + def p_import_as_name(self, p): + """import_as_name : NAME as_name_opt""" + p[0] = ast.alias(name=p[1], asname=p[2]) + + def p_comma_import_as_name(self, p): + """comma_import_as_name : COMMA import_as_name + """ + p[0] = [p[2]] + + def p_dotted_as_name(self, p): + """dotted_as_name : dotted_name as_name_opt""" + p0 = ast.alias(name=p[1], asname=p[2]) + p[0] = p0 + + def p_comma_dotted_as_name(self, p): + """comma_dotted_as_name : COMMA dotted_as_name""" + p[0] = [p[2]] + + def p_import_as_names(self, p): + """import_as_names : import_as_name comma_import_as_name_list_opt comma_opt + """ + p1, p2 = p[1], p[2] + p0 = [p1] + if p2 is not None: + p0.extend(p2) + p[0] = p0 + + def p_dotted_as_names(self, p): + """dotted_as_names : dotted_as_name comma_dotted_as_name_list_opt""" + p1, p2 = p[1], p[2] + p0 = [p1] + if p2 is not None: + p0.extend(p2) + p[0] = p0 + + def p_period_name(self, p): + """period_name : PERIOD NAME""" + p[0] = p[1] + p[2] + + def p_dotted_name(self, p): + """dotted_name : NAME + | NAME period_name_list + """ + p[0] = p[1] if len(p) == 2 else p[1] + p[2] + + def p_comma_name(self, p): + """comma_name : COMMA NAME""" + p[0] = [p[2]] + + def p_global_stmt(self, p): + """global_stmt : GLOBAL NAME comma_name_list_opt""" + p2, p3 = p[2], p[3] + names = [p2] + if p3 is not None: + names += p3 + p[0] = ast.Global(names=names, lineno=self.lineno, col_offset=self.col) + + def p_nonlocal_stmt(self, p): + """nonlocal_stmt : NONLOCAL NAME comma_name_list_opt""" + p2, p3 = p[2], p[3] + names = [p2] + if p3 is not None: + names += p3 + p[0] = ast.Nonlocal(names=names, + lineno=self.lineno, + col_offset=self.col) + + def p_comma_test(self, p): + """comma_test : COMMA test""" + p[0] = [p[2]] + + def p_assert_stmt(self, p): + """assert_stmt : ASSERT test comma_test_opt""" + p2, p3 = p[2], p[3] + if p3 is not None: + if len(p3) != 1: + assert False + p3 = p3[0] + p0 = ast.Assert(test=p2, + msg=p3, + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_compound_stmt(self, p): + """compound_stmt : if_stmt + | while_stmt + | for_stmt + | try_stmt + | with_stmt + | funcdef + | classdef + | decorated + | async_stmt + """ + p[0] = p[1] + + def p_elif_part(self, p): + """elif_part : ELIF test COLON suite""" + p[0] = [ast.If(test=p[2], + body=p[4], + orelse=[], + lineno=self.lineno, + col_offset=self.col)] + + def p_else_part(self, p): + """else_part : ELSE COLON suite""" + p[0] = p[3] + + def p_if_stmt(self, p): + """if_stmt : IF test COLON suite elif_part_list_opt + | IF test COLON suite elif_part_list_opt else_part + """ + lastif = ast.If(test=p[2], + body=p[4], + orelse=[], + lineno=self.lineno, + col_offset=self.col) + p0 = [lastif] + p5 = p[5] + p6 = p[6] if len(p) > 6 else [] + if p5 is not None: + for elseif in p5: + lastif.orelse.append(elseif) + lastif = elseif + lastif.orelse = p6 + p[0] = p0 + + def p_while_stmt(self, p): + """while_stmt : WHILE test COLON suite + | WHILE test COLON suite else_part + """ + p5 = p[5] if len(p) > 5 else [] + p[0] = [ast.While(test=p[2], + body=p[4], + orelse=p5, + lineno=self.lineno, + col_offset=self.col)] + + def p_for_stmt(self, p): + """for_stmt : FOR exprlist IN testlist COLON suite + | FOR exprlist IN testlist COLON suite else_part + """ + p2 = p[2] + p7 = p[7] if len(p) > 7 else [] + if len(p2) == 1: + p2 = p2[0] + store_ctx(p2) + else: + for x in p2: + store_ctx(x) + p2 = ast.Tuple(elts=p2, + ctx=ast.Store(), + lineno=self.lineno, + col_offset=self.col) + p[0] = [ast.For(target=p2, + iter=p[4], + body=p[6], + orelse=p7, + lineno=self.lineno, + col_offset=self.col)] + + def p_async_for_stmt(self, p): + """async_for_stmt : ASYNC for_stmt""" + f = p[2][0] + p[0] = [ast.AsyncFor(**f.__dict__)] + + def p_except_part(self, p): + """except_part : except_clause COLON suite""" + p0 = p[1] + p0.body = p[3] + p[0] = [p0] + + def p_finally_part(self, p): + """finally_part : FINALLY COLON suite""" + p[0] = p[3] + + def p_try_stmt(self, p): + """try_stmt : TRY COLON suite except_part_list else_part finally_part_opt + | TRY COLON suite except_part_list finally_part_opt + | TRY COLON suite finally_part + """ + lenp = len(p) + t = ast.Try(body=p[3], lineno=self.lineno, col_offset=self.col) + if lenp == 7: + p5, p6 = p[5], p[6] + t.handlers = p[4] + t.orelse = [] if p5 is None else p5 + t.finalbody = [] if p6 is None else p6 + elif lenp == 6: + p5 = p[5] + t.handlers = p[4] + t.orelse = [] + t.finalbody = [] if p5 is None else p5 + else: + t.handlers = [] + t.orelse = [] + t.finalbody = p[4] + p[0] = [t] + + def p_with_stmt(self, p): + """with_stmt : WITH with_item COLON suite + | WITH with_item comma_with_item_list COLON suite + """ + p2, p3 = [p[2]], p[3] + if len(p) == 5: + body = p[4] + else: + p2 += p3 + body = p[5] + p[0] = [ast.With(items=p2, + body=body, + lineno=self.lineno, + col_offset=self.col)] + + def p_async_with_stmt(self, p): + """async_with_stmt : ASYNC with_stmt""" + w = p[2][0] + p[0] = [ast.AsyncWith(**w.__dict__)] + + def p_as_expr(self, p): + """as_expr : AS expr""" + p2 = p[2] + store_ctx(p2) + p[0] = p2 + + def p_with_item(self, p): + """with_item : test + | test as_expr + """ + p2 = p[2] if len(p) > 2 else None + p[0] = ast.withitem(context_expr=p[1], optional_vars=p2) + + def p_comma_with_item(self, p): + """comma_with_item : COMMA with_item""" + p[0] = [p[2]] + + def p_except_clause(self, p): + """except_clause : EXCEPT + | EXCEPT test as_name_opt + """ + if len(p) == 2: + p0 = ast.ExceptHandler(type=None, + name=None, + lineno=self.lineno, + col_offset=self.col) + else: + p0 = ast.ExceptHandler(type=p[2], + name=p[3], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_async_stmt(self, p): + """async_stmt : async_funcdef + | async_with_stmt + | async_for_stmt + """ + p[0] = p[1] + + def p_suite(self, p): + """suite : simple_stmt + | NEWLINE INDENT stmt_list DEDENT + """ + p[0] = p[1] if len(p) == 2 else p[3] + + def p_test(self, p): + """test : or_test + | or_test IF or_test ELSE test + | lambdef + """ + if len(p) == 2: + p0 = p[1] + else: + p0 = ast.IfExp(test=p[3], + body=p[1], + orelse=p[5], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_test_nocond(self, p): + """test_nocond : or_test + | lambdef_nocond + """ + p[0] = p[1] + + def p_lambdef(self, p): + """lambdef : LAMBDA varargslist_opt COLON test""" + p2, p4 = p[2], p[4] + if p2 is None: + args = ast.arguments(args=[], + vararg=None, + kwonlyargs=[], + kw_defaults=[], + kwarg=None, + defaults=[]) + else: + args = p2 + p0 = ast.Lambda(args=args, + body=p4, + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_lambdef_nocond(self, p): + """lambdef_nocond : LAMBDA varargslist_opt COLON test_nocond""" + assert False + + def p_or_test(self, p): + """or_test : and_test or_and_test_list_opt""" + p2 = p[2] + if p2 is None: + p0 = p[1] + elif len(p2) == 2: + p0 = ast.BoolOp(op=p2[0], + values=[p[1], p2[1]], + lineno=self.lineno, + col_offset=self.col) + else: + p0 = ast.BoolOp(op=p2[0], + values=[p[1]] + p2[1::2], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_or_and_test(self, p): + """or_and_test : OR and_test""" + p[0] = [ast.Or(), p[2]] + + def p_and_test(self, p): + """and_test : not_test and_not_test_list_opt""" + p2 = p[2] + if p2 is None: + p0 = p[1] + elif len(p2) == 2: + p0 = ast.BoolOp(op=p2[0], + values=[p[1], p2[1]], + lineno=self.lineno, + col_offset=self.col) + else: + p0 = ast.BoolOp(op=p2[0], + values=[p[1]] + p2[1::2], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_and_not_test(self, p): + """and_not_test : AND not_test""" + p[0] = [ast.And(), p[2]] + + def p_not_test(self, p): + """not_test : NOT not_test + | comparison + """ + if len(p) == 2: + p0 = p[1] + else: + p0 = ast.UnaryOp(op=ast.Not(), + operand=p[2], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_comparison(self, p): + """comparison : expr comp_op_expr_list_opt""" + p2 = p[2] + if p2 is None: + p0 = p[1] + else: + p0 = ast.Compare(left=p[1], + ops=p2[::2], + comparators=p2[1::2], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_comp_op_expr(self, p): + """comp_op_expr : comp_op expr""" + p[0] = [p[1], p[2]] + + _comp_ops = { + '<': ast.Lt, + '>': ast.Gt, + '==': ast.Eq, + '>=': ast.GtE, + '<=': ast.LtE, + '!=': ast.NotEq, + 'in': ast.In, + ('not', 'in'): ast.NotIn, + 'is': ast.Is, + ('is', 'not'): ast.IsNot + } + + def p_comp_op(self, p): + """comp_op : LT + | GT + | EQ + | GE + | LE + | NE + | IN + | NOT IN + | IS + | IS NOT + """ + key = p[1] if len(p) == 2 else (p[1], p[2]) + p[0] = self._comp_ops[key]() + + def p_star_expr(self, p): + """star_expr : TIMES expr""" + p[0] = ast.Starred(value=p[2], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + + def _binop_combine(self, p1, p2): + """Combines binary operations""" + if p2 is None: + p0 = p1 + elif isinstance(p2, ast.BinOp): + p2.left = p1 + p0 = p2 + elif isinstance(p2, Sequence) and isinstance(p2[0], ast.BinOp): + p0 = p2[0] + p0.left = p1 + for bop in p2[1:]: + bop.left = p0 + p0 = bop + else: + p0 = p1 + p2 + return p0 + + def p_expr(self, p): + """expr : xor_expr + | xor_expr pipe_xor_expr_list + """ + p[0] = self._binop_combine(p[1], p[2] if len(p) > 2 else None) + + def p_pipe_xor_expr(self, p): + """pipe_xor_expr : PIPE xor_expr""" + p[0] = [ast.BinOp(left=None, + op=ast.BitOr(), + right=p[2], + lineno=self.lineno, + col_offset=self.col)] + + def p_xor_expr(self, p): + """xor_expr : and_expr xor_and_expr_list_opt""" + p[0] = self._binop_combine(p[1], p[2]) + + def p_xor_and_expr(self, p): + """xor_and_expr : XOR and_expr""" + p[0] = [ast.BinOp(left=None, + op=ast.BitXor(), + right=p[2], + lineno=self.lineno, + col_offset=self.col)] + + def p_and_expr(self, p): + """and_expr : shift_expr ampersand_shift_expr_list_opt""" + p[0] = self._binop_combine(p[1], p[2]) + + def p_ampersand_shift_expr(self, p): + """ampersand_shift_expr : AMPERSAND shift_expr""" + p[0] = [ast.BinOp(left=None, + op=ast.BitAnd(), + right=p[2], + lineno=self.lineno, + col_offset=self.col)] + + def p_shift_expr(self, p): + """shift_expr : arith_expr shift_arith_expr_list_opt""" + p[0] = self._binop_combine(p[1], p[2]) + + def p_shift_arith_expr(self, p): + """shift_arith_expr : LSHIFT arith_expr + | RSHIFT arith_expr + """ + op = ast.LShift() if p[1] == '<<' else ast.RShift() + p[0] = [ast.BinOp(left=None, + op=op, + right=p[2], + lineno=self.lineno, + col_offset=self.col)] + + def p_arith_expr(self, p): + """arith_expr : term + | term pm_term_list + """ + p2 = p[2] if len(p) > 2 else None + if p2 is None: + p0 = p[1] + elif len(p2) == 2: + p0 = ast.BinOp(left=p[1], + op=p2[0], + right=p2[1], + lineno=self.lineno, + col_offset=self.col) + else: + left = p[1] + for op, right in zip(p2[::2], p2[1::2]): + left = ast.BinOp(left=left, + op=op, + right=right, + lineno=self.lineno, + col_offset=self.col) + p0 = left + p[0] = p0 + + _term_binops = { + '+': ast.Add, + '-': ast.Sub, + '*': ast.Mult, + '@': ast.MatMult, + '/': ast.Div, + '%': ast.Mod, + '//': ast.FloorDiv + } + + def p_pm_term(self, p): + """pm_term : PLUS term + | MINUS term + """ + op = self._term_binops[p[1]]() + p[0] = [op, p[2]] + + def p_term(self, p): + """term : factor op_factor_list_opt""" + p2 = p[2] + if p2 is None: + p0 = p[1] + elif len(p2) == 2: + p0 = ast.BinOp(left=p[1], + op=p2[0], + right=p2[1], + lineno=self.lineno, + col_offset=self.col) + else: + left = p[1] + for op, right in zip(p2[::2], p2[1::2]): + left = ast.BinOp(left=left, + op=op, + right=right, + lineno=self.lineno, + col_offset=self.col) + p0 = left + p[0] = p0 + + def p_op_factor(self, p): + """op_factor : TIMES factor + | AT factor + | DIVIDE factor + | MOD factor + | DOUBLEDIV factor + """ + op = self._term_binops[p[1]] + if op is None: + self._parse_error('operation {0!r} not supported'.format(p[1]), + self.currloc(lineno=p.lineno, column=p.lexpos)) + p[0] = [op(), p[2]] + + _factor_ops = {'+': ast.UAdd, '-': ast.USub, '~': ast.Invert} + + def p_factor(self, p): + """factor : PLUS factor + | MINUS factor + | TILDE factor + | power + """ + if len(p) == 2: + p0 = p[1] + else: + op = self._factor_ops[p[1]]() + p0 = ast.UnaryOp(op=op, + operand=p[2], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_power(self, p): + """power : atom_expr + | atom_expr POW factor + """ + lenp = len(p) + p1 = p[1] + if lenp == 2: + p0 = p1 + elif lenp == 4: + # actual power rule + p0 = ast.BinOp(left=p1, + op=ast.Pow(), + right=p[3], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_yield_expr_or_testlist_comp(self, p): + """yield_expr_or_testlist_comp : yield_expr + | testlist_comp + """ + p[0] = p[1] + + def _list_or_elts_if_not_real_tuple(self, x): + if isinstance(x, ast.Tuple) and not (hasattr(x, '_real_tuple') and \ + x._real_tuple): + rtn = x.elts + else: + rtn = [x] + return rtn + + @docstring_by_version( + v34="""atom_expr : atom trailer_list_opt""", + v35=\ + """atom_expr : atom trailer_list_opt + | AWAIT atom trailer_list_opt + """ + ) + def p_atom_expr(self, p): + lenp = len(p) + if lenp == 3: + leader, trailers = p[1], p[2] + elif lenp == 4: + leader, trailers = p[2], p[3] + else: + assert False + p0 = leader + if trailers is None: + trailers = [] + for trailer in trailers: + if isinstance(trailer, (ast.Index, ast.Slice)): + p0 = ast.Subscript(value=leader, + slice=trailer, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + elif isinstance(trailer, Mapping): + p0 = ast.Call(func=leader, + lineno=self.lineno, + col_offset=self.col, **trailer) + elif isinstance(trailer, str): + if trailer == '?': + p0 = xonsh_help(leader, lineno=self.lineno, col=self.col) + elif trailer == '??': + p0 = xonsh_superhelp(leader, + lineno=self.lineno, + col=self.col) + else: + p0 = ast.Attribute(value=leader, + attr=trailer, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + else: + assert False + leader = p0 + if lenp == 4: + p0 = ast.Await(value=p0, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_atom(self, p): + """atom : LPAREN yield_expr_or_testlist_comp_opt RPAREN + | LBRACKET testlist_comp_opt RBRACKET + | LBRACE dictorsetmaker_opt RBRACE + | NAME + | number + | string_literal_list + | ELLIPSIS + | NONE + | TRUE + | FALSE + | REGEXPATH + | DOLLAR_NAME + | DOLLAR_LBRACE test RBRACE + | DOLLAR_LPAREN subproc RPAREN + | DOLLAR_LBRACKET subproc RBRACKET + """ + p1 = p[1] + if len(p) == 2: + # plain-old atoms + bt = '`' + if isinstance(p1, (ast.Num, ast.Str, ast.Bytes)): + pass + elif p1 == 'True': + p1 = ast.NameConstant(value=True, + lineno=self.lineno, + col_offset=self.col) + elif p1 == 'False': + p1 = ast.NameConstant(value=False, + lineno=self.lineno, + col_offset=self.col) + elif p1 == 'None': + p1 = ast.NameConstant(value=None, + lineno=self.lineno, + col_offset=self.col) + elif p1 == '...': + p1 = ast.Ellipsis(lineno=self.lineno, col_offset=self.col) + elif p1.startswith(bt) and p1.endswith(bt): + p1 = ast.Str(s=p1.strip(bt), + lineno=self.lineno, + col_offset=self.col) + p1 = xonsh_regexpath(p1, lineno=self.lineno, col=self.col) + elif p1.startswith('$'): + p1 = self._envvar_by_name(p1[1:], + lineno=self.lineno, + col=self.col) + else: + p1 = ast.Name(id=p1, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + p[0] = p1 + return + p2 = p[2] + if p2 is None: + # empty container atoms + if p1 == '(': + p0 = ast.Tuple(elts=[], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + elif p1 == '[': + p0 = ast.List(elts=[], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + elif p1 == '{': + p0 = ast.Dict(keys=[], + values=[], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + else: + assert False + elif p1 == '(': + # filled, possible group container tuple atoms + if isinstance(p2, ast.AST): + p0 = p2 + p0._real_tuple = True + elif len(p2) == 1 and isinstance(p2[0], ast.AST): + p0 = p2[0] + else: + assert False + elif p1 == '[': + if isinstance(p2, ast.GeneratorExp): + p0 = ast.ListComp(elt=p2.elt, + generators=p2.generators, + lineno=p2.lineno, + col_offset=p2.col_offset) + else: + if isinstance(p2, ast.Tuple): + if hasattr(p2, '_real_tuple') and p2._real_tuple: + elts = [p2] + else: + elts = p2.elts + else: + elts = [p2] + p0 = ast.List(elts=elts, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + elif p1 == '{': + p0 = p2 + elif p1.startswith('$'): + p0 = self._dollar_rules(p) + else: + assert False + p[0] = p0 + + def p_string_literal(self, p): + """string_literal : STRING""" + s = eval(p[1]) + cls = ast.Bytes if p[1].startswith('b') else ast.Str + p[0] = cls(s=s, lineno=self.lineno, col_offset=self.col) + + def p_string_literal_list(self, p): + """string_literal_list : string_literal + | string_literal_list string_literal + """ + if len(p) == 3: + p[1].s += p[2].s + p[0] = p[1] + + def p_number(self, p): + """number : NUMBER""" + p[0] = ast.Num(n=eval(p[1]), lineno=self.lineno, col_offset=self.col) + + def p_testlist_comp(self, p): + """testlist_comp : test_or_star_expr comp_for + | test_or_star_expr comma_opt + | test_or_star_expr comma_test_or_star_expr_list comma_opt + """ + p1, p2 = p[1], p[2] + if len(p) == 3: + if p2 is None: + # split out grouping parentheses. + p0 = p1 + elif p2 == ',': + p0 = ast.Tuple(elts=[p1], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + elif 'comps' in p2: + p0 = ast.GeneratorExp(elt=p1, + generators=p2['comps'], + lineno=self.lineno, + col_offset=self.col) + else: + assert False + elif len(p) == 4: + p0 = ast.Tuple(elts=[p1], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + if p2 is not None: + p0.elts.extend(p2) + else: + assert False + else: + assert False + p[0] = p0 + + def p_trailer(self, p): + """trailer : LPAREN arglist_opt RPAREN + | LBRACKET subscriptlist RBRACKET + | PERIOD NAME + | DOUBLE_QUESTION + | QUESTION + """ + p1 = p[1] + p2 = p[2] if len(p) > 2 else None + if p1 == '[': + p0 = [p2] + elif p1 == '(': + p0 = [p2 or dict(args=[], keywords=[], starargs=None, kwargs=None)] + elif p1 == '.': + p0 = [p2] + elif p1 == '?' or p1 == '??': + p0 = [p1] + else: + assert False + p[0] = p0 + + def p_subscriptlist(self, p): + """subscriptlist : subscript comma_subscript_list_opt comma_opt""" + p1, p2 = p[1], p[2] + if p2 is not None: + p1.value = ast.Tuple(elts=[p1.value] + [x.value for x in p2], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + p[0] = p1 + + def p_comma_subscript(self, p): + """comma_subscript : COMMA subscript""" + p[0] = [p[2]] + + def p_subscript(self, p): + """subscript : test + | test_opt COLON test_opt sliceop_opt + """ + if len(p) == 2: + p0 = ast.Index(value=p[1]) + else: + p0 = ast.Slice(lower=p[1], upper=p[3], step=p[4]) + p[0] = p0 + + def p_sliceop(self, p): + """sliceop : COLON test_opt""" + p[0] = p[2] + + def p_expr_or_star_expr(self, p): + """expr_or_star_expr : expr + | star_expr + """ + p[0] = p[1] + + def p_comma_expr_or_star_expr(self, p): + """comma_expr_or_star_expr : COMMA expr_or_star_expr""" + p[0] = [p[2]] + + def p_exprlist(self, p): + """exprlist : expr_or_star_expr comma_expr_or_star_expr_list comma_opt + | expr_or_star_expr comma_opt + """ + p1, p2 = p[1], p[2] + p3 = p[3] if len(p) == 4 else None + if p2 is None and p3 is None: + p0 = [p1] + elif p2 == ',' and p3 is None: + p0 = [p1] + elif p2 is not None: + p2.insert(0, p1) + p0 = p2 + else: + assert False + p[0] = p0 + + def p_testlist(self, p): + """testlist : test comma_test_list COMMA + | test comma_test_list + | test COMMA + | test + """ + lenp = len(p) + p1 = p[1] + if lenp > 2: + if isinstance(p1, ast.Tuple) and \ + (hasattr(p1, '_real_tuple') and p1._real_tuple): + p1 = ast.Tuple(elts=[p1], + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + else: + p1 = ensure_has_elts(p1, + lineno=self.lineno, + col_offset=self.col) + p2 = p[2] if lenp > 2 else [] + p2 = [] if p2 == ',' else p2 + p1.elts += p2 + p[0] = p1 + + @docstring_by_version( + v34="""item : test COLON test""", + v35=\ + """item : test COLON test + | POW expr + """, + ) + def p_item(self, p): + lenp = len(p) + if lenp == 4: + p0 = [p[1], p[3]] + elif lenp == 3: + p0 = [None, p[2]] + else: + assert False + p[0] = p0 + + def p_comma_item(self, p): + """comma_item : COMMA item""" + p[0] = p[2] + + def p_dictorsetmaker(self, p): + """dictorsetmaker : item comp_for + | test_or_star_expr comp_for + | testlist + | test_or_star_expr comma_opt + | test_or_star_expr comma_test_or_star_expr_list comma_opt + | test COLON testlist + | item comma_item_list comma_opt + | test COLON test comma_item_list comma_opt + """ + p1 = p[1] + lenp = len(p) + if lenp == 2: + elts = self._list_or_elts_if_not_real_tuple(p1) + p0 = ast.Set(elts=elts, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + elif lenp == 3: + p2 = p[2] + if p2 is None or p2 == ',': + elts = self._list_or_elts_if_not_real_tuple(p1) + p0 = ast.Set(elts=elts, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + else: + comps = p2.get('comps', []) + if isinstance(p1, list) and len(p1) == 2: + p0 = ast.DictComp(key=p1[0], value=p1[1], generators=comps, + lineno=self.lineno, col_offset=self.col) + else: + p0 = ast.SetComp(elt=p1, generators=comps, lineno=self.lineno, + col_offset=self.col) + elif lenp == 4: + p2, p3 = p[2], p[3] + if isinstance(p1, list) and len(p1) == 2: + cls = ast.Dict + keys = [p1[0]] + vals = [p1[1]] + for k, v in zip(p2[::2], p2[1::2]): + keys.append(k) + vals.append(v) + p0 = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + elif p2 == ':': + keys = [p1] + vals = self._list_or_elts_if_not_real_tuple(p3) + p0 = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + elif isinstance(p1, ast.AST): + elts = [p1] + p2 + p0 = ast.Set(elts=elts, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + else: + assert False + elif lenp == 6: + p4 = p[4] + keys = [p1] + vals = [p[3]] + for k, v in zip(p4[::2], p4[1::2]): + keys.append(k) + vals.append(v) + p0 = ast.Dict(keys=keys, values=vals, ctx=ast.Load(), lineno=self.lineno, + col_offset=self.col) + else: + assert False + p[0] = p0 + + def p_classdef(self, p): + """classdef : CLASS NAME func_call_opt COLON suite""" + p3 = p[3] + b, kw = ([], []) if p3 is None else (p3['args'], p3['keywords']) + c = ast.ClassDef(name=p[2], + bases=b, + keywords=kw, + starargs=None, + kwargs=None, + body=p[5], + decorator_list=[], + lineno=self.lineno, + col_offset=self.col) + p[0] = [c] + + def _set_arg(self, args, arg, ensure_kw=False): + if isinstance(arg, ast.keyword): + args['keywords'].append(arg) + elif ensure_kw: + if VER_MAJOR_MINOR <= VER_3_4: + args['kwargs'] = arg + elif VER_MAJOR_MINOR >= VER_3_5: + args['keywords'].append(ast.keyword(arg=None, value=arg)) + else: + args['args'].append(arg) + + # + # arglist rule had significant changes + # + if VER_3_5 <= VER_MAJOR_MINOR: + def p_arglist(self, p): + """arglist : argument comma_opt + | argument comma_argument_list comma_opt + """ + p0 = {'args': [], 'keywords': []} + p1, p2 = p[1], p[2] + p2 = None if p2 == ',' else p2 + self._set_arg(p0, p1) + if p2 is not None: + for arg in p2: + self._set_arg(p0, arg) + p[0] = p0 + else: # Python v3.4 + def p_arglist(self, p): + """arglist : argument comma_opt + | argument_comma_list argument comma_opt + | argument_comma_list_opt TIMES test comma_argument_list_opt + | argument_comma_list_opt TIMES test COMMA POW test + | argument_comma_list_opt TIMES test comma_argument_list COMMA POW test + | argument_comma_list_opt POW test + """ + lenp = len(p) + p1, p2 = p[1], p[2] + p0 = {'args': [], 'keywords': [], 'starargs': None, 'kwargs': None} + if lenp == 3: + self._set_arg(p0, p1) + elif lenp == 4 and p2 != '**': + for arg in p1: + self._set_arg(p0, arg) + self._set_arg(p0, p2) + elif lenp == 4 and p2 == '**': + if p1 is not None: + for arg in p1: + self._set_arg(p0, arg) + self._set_arg(p0, p[3], ensure_kw=True) + elif lenp == 5: + p0['starargs'], p4 = p[3], p[4] + if p1 is not None: + for arg in p1: + self._set_arg(p0, arg) + if p4 is not None: + for arg in p4: + self._set_arg(p0, arg, ensure_kw=True) + elif lenp == 7: + p0['starargs'] = p[3] + if p1 is not None: + for arg in p1: + self._set_arg(p0, arg) + self._set_arg(p0, p[6], ensure_kw=True) + elif lenp == 8: + kwkey = 'keywords' if VER_MAJOR_MINOR >= VER_3_5 else 'kwargs' + p0['starargs'], p4 = p[3], p[4] + if p1 is not None: + for arg in p1: + self._set_arg(p0, arg) + for arg in p4: + self._set_arg(p0, arg, ensure_kw=True) + self._set_arg(p0, p[7], ensure_kw=True) + else: + assert False + p[0] = p0 + + if VER_MAJOR_MINOR <= VER_3_4: + def p_argument_comma(self, p): + """argument_comma : argument COMMA""" + p[0] = [p[1]] + + def p_comma_argument(self, p): + """comma_argument : COMMA argument""" + p[0] = [p[2]] + + @docstring_by_version( + v34 = \ + """argument : test + | test comp_for + | test EQUALS test + """, + v35 = \ + """argument : test_or_star_expr + | test comp_for + | test EQUALS test + | POW test + | TIMES test + """, + ) + def p_argument(self, p): + # v3.4 Notes + # Really [keyword '='] test + # The reason that keywords are test nodes instead of NAME is that using + # NAME results in an ambiguity. + # + # v3.5 Notes + # "test '=' test" is really "keyword '=' test", but we have no such token. + # These need to be in a single rule to avoid grammar that is ambiguous + # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, + # we explicitly match '*' here, too, to give it proper precedence. + # Illegal combinations and orderings are blocked in ast.c: + # multiple (test comp_for) arguements are blocked; keyword unpackings + # that precede iterable unpackings are blocked; etc. + p1 = p[1] + lenp = len(p) + if lenp == 2: + p0 = p1 + elif lenp == 3: + if p1 == '**': + p0 = ast.keyword(arg=None, value=p[2]) + elif p1 == '*': + p0 = ast.Starred(value=p[2]) + else: + p0 = ast.GeneratorExp(elt=p1, generators=p[2]['comps'], + lineno=self.lineno, col_offset=self.col) + elif lenp == 4: + p0 = ast.keyword(arg=p1.id, value=p[3]) + else: + assert False + p[0] = p0 + + def p_comp_iter(self, p): + """comp_iter : comp_for + | comp_if + """ + p[0] = p[1] + + def p_comp_for(self, p): + """comp_for : FOR exprlist IN or_test comp_iter_opt""" + targs, it, p5 = p[2], p[4], p[5] + if len(targs) == 1: + targ = targs[0] + else: + targ = ensure_has_elts(targs, lineno=self.lineno, + col_offset=self.col) + store_ctx(targ) + comp = ast.comprehension(target=targ, iter=it, ifs=[]) + comps = [comp] + p0 = {'comps': comps} + if p5 is not None: + comps += p5.get('comps', []) + comp.ifs += p5.get('if', []) + p[0] = p0 + + def p_comp_if(self, p): + """comp_if : IF test_nocond comp_iter_opt""" + p2, p3 = p[2], p[3] + p0 = {'if': [p2]} + if p3 is not None: + p0['comps'] = p3.get('comps', []) + p[0] = p0 + + def p_yield_expr(self, p): + """yield_expr : YIELD yield_arg_opt""" + p2 = p[2] + if p2 is None: + p0 = ast.Yield(value=p2, lineno=self.lineno, col_offset=self.col) + elif p2['from']: + p0 = ast.YieldFrom(value=p2['val'], + lineno=self.lineno, + col_offset=self.col) + else: + p0 = ast.Yield(value=p2['val'], + lineno=self.lineno, + col_offset=self.col) + p[0] = p0 + + def p_yield_arg(self, p): + """yield_arg : FROM test + | testlist + """ + if len(p) == 2: + p0 = {'from': False, 'val': p[1]} + else: + p0 = {'from': True, 'val': p[2]} + p[0] = p0 + + # + # subprocess + # + + def _dollar_rules(self, p): + """These handle the special xonsh $ shell atoms by looking up + in a special __xonsh_env__ dictionary injected in the __builtin__. + """ + lenp = len(p) + p1, p2 = p[1], p[2] + col = self.col + lineno = self.lineno + if lenp == 3: # $NAME + p0 = self._envvar_by_name(p2, lineno=lineno, col=col) + elif p1 == '${': + xenv = self._xenv(lineno=lineno, col=col) + idx = ast.Index(value=p2) + p0 = ast.Subscript(value=xenv, + slice=idx, + ctx=ast.Load(), + lineno=lineno, + col_offset=col) + elif p1 == '$(': + p0 = xonsh_call('__xonsh_subproc_captured__', p2, + lineno=lineno, + col=col) + elif p1 == '$[': + p0 = xonsh_call('__xonsh_subproc_uncaptured__', p2, + lineno=lineno, + col=col) + else: + assert False + return p0 + + def _xenv(self, lineno=lineno, col=col): + """Creates a new xonsh env reference.""" + return ast.Name(id='__xonsh_env__', + ctx=ast.Load(), + lineno=lineno, + col_offset=col) + + def _envvar_by_name(self, var, lineno=None, col=None): + """Looks up a xonsh variable by name.""" + xenv = self._xenv(lineno=lineno, col=col) + idx = ast.Index(value=ast.Str(s=var, lineno=lineno, col_offset=col)) + return ast.Subscript(value=xenv, + slice=idx, + ctx=ast.Load(), + lineno=lineno, + col_offset=col) + + def _subproc_cliargs(self, args, lineno=None, col=None): + """Creates an expression for subprocess CLI arguments.""" + cliargs = currlist = empty_list(lineno=lineno, col=col) + for arg in args: + action = arg._cliarg_action + if action == 'append': + if currlist is None: + currlist = empty_list(lineno=lineno, col=col) + cliargs = binop(cliargs, ast.Add(), currlist, + lineno=lineno, + col=col) + currlist.elts.append(arg) + elif action == 'extend': + cliargs = binop(cliargs, ast.Add(), arg, + lineno=lineno, + col=col) + currlist = None + elif action == 'splitlines': + sl = call_split_lines(arg, lineno=lineno, col=col) + cliargs = binop(cliargs, ast.Add(), sl, lineno=lineno, col=col) + currlist = None + elif action == 'ensure_list': + x = ensure_list_from_str_or_list(arg, lineno=lineno, col=col) + cliargs = binop(cliargs, ast.Add(), x, lineno=lineno, col=col) + currlist = None + else: + raise ValueError("action not understood: " + action) + del arg._cliarg_action + return cliargs + + def p_pipe(self, p): + """pipe : PIPE + | WS PIPE + | PIPE WS + | WS PIPE WS + """ + p1 = p[1] + if len(p) > 2 and len(p1.strip()) == 0: + p1 = p[2] + p[0] = ast.Str(s=p1, lineno=self.lineno, col_offset=self.col) + + def p_subproc(self, p): + """subproc : subproc_atoms + | subproc_atoms WS + | subproc AMPERSAND + | subproc pipe subproc_atoms + | subproc pipe subproc_atoms WS + """ + lineno = self.lineno + col = self.col + lenp = len(p) + p1 = p[1] + if lenp == 2: + p0 = [self._subproc_cliargs(p1, lineno=lineno, col=col)] + elif p[2] == '&': + p0 = p1 + [ast.Str(s=p[2], lineno=lineno, col_offset=col)] + elif lenp == 3: + p0 = [self._subproc_cliargs(p1, lineno=lineno, col=col)] + else: + if len(p1) > 1 and hasattr(p1[-2], 's') and p1[-2].s != '|': + msg = 'additional redirect following non-pipe redirect' + self._parse_error(msg, self.currloc(lineno=lineno, column=col)) + cliargs = self._subproc_cliargs(p[3], lineno=lineno, col=col) + p0 = p1 + [p[2], cliargs] + # return arguments list + p[0] = p0 + + def p_subproc_atoms(self, p): + """subproc_atoms : subproc_atom + | subproc_atoms WS subproc_atom + """ + p1 = p[1] + if len(p) < 4: + p1 = [p1] + else: + p1.append(p[3]) + p[0] = p1 + + def p_subproc_atom(self, p): + """subproc_atom : subproc_arg + | string_literal + | REGEXPATH + | DOLLAR_NAME + | GT + | LT + | RSHIFT + | IOREDIRECT + | AT_LPAREN test RPAREN + | DOLLAR_LBRACE test RBRACE + | DOLLAR_LPAREN subproc RPAREN + | DOLLAR_LBRACKET subproc RBRACKET + """ + lenp = len(p) + p1 = p[1] + if lenp == 2: + if isinstance(p1, str): + p0 = ast.Str(s=p1, lineno=self.lineno, col_offset=self.col) + bt = '`' + if p1.startswith(bt) and p1.endswith(bt): + p0.s = p1.strip(bt) + p0 = xonsh_regexpath(p0, lineno=self.lineno, col=self.col) + p0._cliarg_action = 'extend' + elif '*' in p1: + p0 = xonsh_call('__xonsh_glob__', + args=[p0], + lineno=self.lineno, + col=self.col) + p0._cliarg_action = 'extend' + elif p1.startswith('$'): + p0 = self._envvar_by_name(p1[1:], + lineno=self.lineno, + col=self.col) + p0 = xonsh_call('__xonsh_ensure_list_of_strs__', [p0], + lineno=self.lineno, + col=self.col) + p0._cliarg_action = 'extend' + else: + p0.s = os.path.expanduser(p0.s) + p0._cliarg_action = 'append' + elif isinstance(p1, ast.AST): + p0 = p1 + p0._cliarg_action = 'append' + else: + assert False + elif p1 == '@(': + p0 = xonsh_call('__xonsh_ensure_list_of_strs__', [p[2]], + lineno=self.lineno, + col=self.col) + p0._cliarg_action = 'extend' + elif p1 == '${': + xenv = self._xenv(lineno=self.lineno, col=self.col) + idx = ast.Index(value=p[2]) + p0 = ast.Subscript(value=xenv, + slice=idx, + ctx=ast.Load(), + lineno=self.lineno, + col_offset=self.col) + p0._cliarg_action = 'append' + elif p1 == '$(': + p0 = xonsh_call('__xonsh_subproc_captured__', + args=p[2], + lineno=self.lineno, + col=self.col) + p0._cliarg_action = 'splitlines' + elif p1 == '$[': + p0 = xonsh_call('__xonsh_subproc_uncaptured__', + args=p[2], + lineno=self.lineno, + col=self.col) + p0._cliarg_action = 'splitlines' + else: + assert False + p[0] = p0 + + def p_subproc_arg(self, p): + """subproc_arg : subproc_arg_part + | subproc_arg subproc_arg_part + """ + # This glues the string together after parsing + p1 = p[1] + if len(p) == 2: + p0 = p1 + else: + p0 = p1 + p[2] + p[0] = p0 + + def p_subproc_arg_part(self, p): + """subproc_arg_part : NAME + | TILDE + | PERIOD + | DIVIDE + | MINUS + | PLUS + | COLON + | AT + | EQUALS + | TIMES + | POW + | MOD + | XOR + | DOUBLEDIV + | ELLIPSIS + | NONE + | TRUE + | FALSE + | NUMBER + | STRING + """ + # Many tokens cannot be part of this list, such as $, ', ", () + # Use a string atom instead. + p[0] = p[1] + + # + # Helpers + # + + def p_empty(self, p): + 'empty : ' + p[0] = None + + def p_error(self, p): + if p is None: + self._parse_error('no further code', None) + elif p.type == 'ERRORTOKEN': + if isinstance(p.value, BaseException): + raise p.value + else: + self._parse_error(p.value, self.currloc(lineno=p.lineno, + column=p.lexpos)) + else: + msg = 'code: {0}'.format(p.value), + self._parse_error(msg, self.currloc(lineno=p.lineno, + column=p.lexpos)) diff --git a/xonsh/parsers/__init__.py b/xonsh/parsers/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/parsers/base.py b/xonsh/parsers/base.py deleted file mode 100644 index 9a8ca15..0000000 --- a/xonsh/parsers/base.py +++ /dev/null @@ -1,3292 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the base xonsh parser.""" -import os -import re -import time -import textwrap -from threading import Thread -from ast import parse as pyparse -from collections.abc import Iterable, Sequence, Mapping - -from xonsh.ply.ply import yacc - -from xonsh.tools import FORMATTER -from xonsh import ast -from xonsh.ast import has_elts, xonsh_call, load_attribute_chain -from xonsh.lexer import Lexer, LexToken -from xonsh.platform import PYTHON_VERSION_INFO -from xonsh.tokenize import SearchPath, StringPrefix -from xonsh.lazyasd import LazyObject, lazyobject -from xonsh.parsers.context_check import check_contexts - - -RE_SEARCHPATH = LazyObject(lambda: re.compile(SearchPath), globals(), "RE_SEARCHPATH") -RE_STRINGPREFIX = LazyObject( - lambda: re.compile(StringPrefix), globals(), "RE_STRINGPREFIX" -) - - -@lazyobject -def RE_FSTR_EVAL_CHARS(): - return re.compile(".*?[!@$`]") - - -class Location(object): - """Location in a file.""" - - def __init__(self, fname, lineno, column=None): - """Takes a filename, line number, and optionally a column number.""" - self.fname = fname - self.lineno = lineno - self.column = column - - def __str__(self): - s = "{0}:{1}".format(self.fname, self.lineno) - if self.column is not None: - s += ":{0}".format(self.column) - return s - - -def ensure_has_elts(x, lineno=None, col_offset=None): - """Ensures that x is an AST node with elements.""" - if not has_elts(x): - if not isinstance(x, Iterable): - x = [x] - lineno = x[0].lineno if lineno is None else lineno - col_offset = x[0].col_offset if col_offset is None else col_offset - x = ast.Tuple(elts=x, ctx=ast.Load(), lineno=lineno, col_offset=col_offset) - return x - - -def empty_list(lineno=None, col=None): - """Creates the AST node for an empty list.""" - return ast.List(elts=[], ctx=ast.Load(), lineno=lineno, col_offset=col) - - -def binop(x, op, y, lineno=None, col=None): - """Creates the AST node for a binary operation.""" - lineno = x.lineno if lineno is None else lineno - col = x.col_offset if col is None else col - return ast.BinOp(left=x, op=op, right=y, lineno=lineno, col_offset=col) - - -def call_split_lines(x, lineno=None, col=None): - """Creates the AST node for calling the 'splitlines' attribute of an - object, nominally a string. - """ - return ast.Call( - func=ast.Attribute( - value=x, attr="splitlines", ctx=ast.Load(), lineno=lineno, col_offset=col - ), - args=[], - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ) - - -def ensure_list_from_str_or_list(x, lineno=None, col=None): - """Creates the AST node for the following expression:: - - [x] if isinstance(x, str) else x - - Somewhat useful. - """ - return ast.IfExp( - test=ast.Call( - func=ast.Name( - id="isinstance", ctx=ast.Load(), lineno=lineno, col_offset=col - ), - args=[x, ast.Name(id="str", ctx=ast.Load(), lineno=lineno, col_offset=col)], - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ), - body=ast.List(elts=[x], ctx=ast.Load(), lineno=lineno, col_offset=col), - orelse=x, - lineno=lineno, - col_offset=col, - ) - - -def xonsh_help(x, lineno=None, col=None): - """Creates the AST node for calling the __xonsh__.help() function.""" - return xonsh_call("__xonsh__.help", [x], lineno=lineno, col=col) - - -def xonsh_superhelp(x, lineno=None, col=None): - """Creates the AST node for calling the __xonsh__.superhelp() function.""" - return xonsh_call("__xonsh__.superhelp", [x], lineno=lineno, col=col) - - -def xonsh_pathsearch(pattern, pymode=False, lineno=None, col=None): - """Creates the AST node for calling the __xonsh__.pathsearch() function. - The pymode argument indicate if it is called from subproc or python mode""" - pymode = ast.NameConstant(value=pymode, lineno=lineno, col_offset=col) - searchfunc, pattern = RE_SEARCHPATH.match(pattern).groups() - pattern = ast.Str(s=pattern, lineno=lineno, col_offset=col) - pathobj = False - if searchfunc.startswith("@"): - func = searchfunc[1:] - elif "g" in searchfunc: - func = "__xonsh__.globsearch" - pathobj = "p" in searchfunc - else: - func = "__xonsh__.regexsearch" - pathobj = "p" in searchfunc - func = load_attribute_chain(func, lineno=lineno, col=col) - pathobj = ast.NameConstant(value=pathobj, lineno=lineno, col_offset=col) - return xonsh_call( - "__xonsh__.pathsearch", - args=[func, pattern, pymode, pathobj], - lineno=lineno, - col=col, - ) - - -def load_ctx(x): - """Recursively sets ctx to ast.Load()""" - if not hasattr(x, "ctx"): - return - x.ctx = ast.Load() - if isinstance(x, (ast.Tuple, ast.List)): - for e in x.elts: - load_ctx(e) - elif isinstance(x, ast.Starred): - load_ctx(x.value) - - -def store_ctx(x): - """Recursively sets ctx to ast.Store()""" - if not hasattr(x, "ctx"): - return - x.ctx = ast.Store() - if isinstance(x, (ast.Tuple, ast.List)): - for e in x.elts: - store_ctx(e) - elif isinstance(x, ast.Starred): - store_ctx(x.value) - - -def del_ctx(x): - """Recursively sets ctx to ast.Del()""" - if not hasattr(x, "ctx"): - return - x.ctx = ast.Del() - if isinstance(x, (ast.Tuple, ast.List)): - for e in x.elts: - del_ctx(e) - elif isinstance(x, ast.Starred): - del_ctx(x.value) - - -def empty_list_if_newline(x): - return [] if x == "\n" else x - - -def lopen_loc(x): - """Extracts the line and column number for a node that may have an opening - parenthesis, brace, or bracket. - """ - lineno = x._lopen_lineno if hasattr(x, "_lopen_lineno") else x.lineno - col = x._lopen_col if hasattr(x, "_lopen_col") else x.col_offset - return lineno, col - - -def hasglobstar(x): - """Returns True if a node has literal '*' for globbing.""" - if isinstance(x, ast.Str): - return "*" in x.s - elif isinstance(x, list): - for e in x: - if hasglobstar(e): - return True - else: - return False - else: - return False - - -def _wrap_fstr_field(field, spec, conv): - rtn = "{" + field - if conv: - rtn += "!" + conv - if spec: - rtn += ":" + spec - rtn += "}" - return rtn - - -def eval_fstr_fields(fstring, prefix, filename=None): - """Takes an fstring (and its prefix, ie f") that may contain - xonsh expressions as its field values and - substitues them for a xonsh eval() call as needed. Roughly, - for example, this will take f"{$HOME}" and transform it to - be f"{__xonsh__.execer.eval(r'$HOME')}". - """ - last = fstring[-1] - q, r = ("'", r"\'") if last == '"' else ('"', r"\"") - prelen = len(prefix) - postlen = len(fstring) - len(fstring.rstrip(last)) - template = fstring[prelen:-postlen] - repl = prefix - for literal, field, spec, conv in FORMATTER.parse(template): - repl += literal - if field is None: - continue - elif RE_FSTR_EVAL_CHARS.match(field) is None: - # just a normal python field, simply reconstruct. - repl += _wrap_fstr_field(field, spec, conv) - else: - # the field has a special xonsh character, so we must eval it - eval_field = "__xonsh__.execer.eval(r" + q - eval_field += field.lstrip().replace(q, r) - eval_field += q + ", glbs=globals(), locs=locals()" - if filename is not None: - eval_field += ", filename=" + q + filename + q - eval_field += ")" - repl += _wrap_fstr_field(eval_field, spec, conv) - repl += last * postlen - return repl - - -class YaccLoader(Thread): - """Thread to load (but not shave) the yacc parser.""" - - def __init__(self, parser, yacc_kwargs, *args, **kwargs): - super().__init__(*args, **kwargs) - self.daemon = True - self.parser = parser - self.yacc_kwargs = yacc_kwargs - self.start() - - def run(self): - self.parser.parser = yacc.yacc(**self.yacc_kwargs) - - -class BaseParser(object): - """A base class that parses the xonsh language.""" - - def __init__( - self, - lexer_optimize=True, - lexer_table="xonsh.lexer_table", - yacc_optimize=True, - yacc_table="xonsh.parser_table", - yacc_debug=False, - outputdir=None, - ): - """Parameters - ---------- - lexer_optimize : bool, optional - Set to false when unstable and true when lexer is stable. - lexer_table : str, optional - Lexer module used when optimized. - yacc_optimize : bool, optional - Set to false when unstable and true when parser is stable. - yacc_table : str, optional - Parser module used when optimized. - yacc_debug : debug, optional - Dumps extra debug info. - outputdir : str or None, optional - The directory to place generated tables within. Defaults to the root - xonsh dir. - """ - self.lexer = lexer = Lexer() - self.tokens = lexer.tokens - - self._lines = None - self.xonsh_code = None - self._attach_nocomma_tok_rules() - self._attach_nocloser_base_rules() - self._attach_nodedent_base_rules() - self._attach_nonewline_base_rules() - self._attach_subproc_arg_part_rules() - - opt_rules = [ - "newlines", - "arglist", - "func_call", - "rarrow_test", - "typedargslist", - "equals_test", - "colon_test", - "tfpdef", - "comma_tfpdef_list", - "comma_pow_tfpdef", - "vfpdef", - "comma_vfpdef_list", - "comma_pow_vfpdef", - "equals_yield_expr_or_testlist_list", - "testlist", - "as_name", - "period_or_ellipsis_list", - "comma_import_as_name_list", - "comma_dotted_as_name_list", - "comma_name_list", - "comma_test", - "elif_part_list", - "finally_part", - "varargslist", - "or_and_test_list", - "and_not_test_list", - "comp_op_expr_list", - "xor_and_expr_list", - "ampersand_shift_expr_list", - "shift_arith_expr_list", - "op_factor_list", - "trailer_list", - "testlist_comp", - "yield_expr_or_testlist_comp", - "dictorsetmaker", - "comma_subscript_list", - "test", - "sliceop", - "comp_iter", - "yield_arg", - "test_comma_list", - "macroarglist", - "any_raw_toks", - ] - for rule in opt_rules: - self._opt_rule(rule) - - list_rules = [ - "comma_tfpdef", - "comma_vfpdef", - "semi_small_stmt", - "comma_test_or_star_expr", - "period_or_ellipsis", - "comma_import_as_name", - "comma_dotted_as_name", - "period_name", - "comma_name", - "elif_part", - "except_part", - "comma_with_item", - "or_and_test", - "and_not_test", - "comp_op_expr", - "pipe_xor_expr", - "xor_and_expr", - "ampersand_shift_expr", - "shift_arith_expr", - "pm_term", - "op_factor", - "trailer", - "comma_subscript", - "comma_expr_or_star_expr", - "comma_test", - "comma_argument", - "comma_item", - "attr_period_name", - "test_comma", - "equals_yield_expr_or_testlist", - "comma_nocomma", - ] - for rule in list_rules: - self._list_rule(rule) - - tok_rules = [ - "def", - "class", - "return", - "number", - "name", - "bang", - "none", - "true", - "false", - "ellipsis", - "if", - "del", - "assert", - "lparen", - "lbrace", - "lbracket", - "string", - "times", - "plus", - "minus", - "divide", - "doublediv", - "mod", - "at", - "lshift", - "rshift", - "pipe", - "xor", - "ampersand", - "for", - "colon", - "import", - "except", - "nonlocal", - "global", - "yield", - "from", - "raise", - "with", - "dollar_lparen", - "dollar_lbrace", - "dollar_lbracket", - "try", - "bang_lparen", - "bang_lbracket", - "comma", - "rparen", - "rbracket", - "at_lparen", - "atdollar_lparen", - "indent", - "dedent", - "newline", - "lambda", - "ampersandequal", - "as", - "atdollar", - "atequal", - "break", - "continue", - "divequal", - "dollar_name", - "double_question", - "doubledivequal", - "elif", - "else", - "eq", - "equals", - "errortoken", - "finally", - "ge", - "in", - "is", - "le", - "lshiftequal", - "minusequal", - "modequal", - "ne", - "pass", - "period", - "pipeequal", - "plusequal", - "pow", - "powequal", - "question", - "rarrow", - "rshiftequal", - "semi", - "tilde", - "timesequal", - "while", - "xorequal", - ] - for rule in tok_rules: - self._tok_rule(rule) - - yacc_kwargs = dict( - module=self, - debug=yacc_debug, - start="start_symbols", - optimize=yacc_optimize, - tabmodule=yacc_table, - ) - if not yacc_debug: - yacc_kwargs["errorlog"] = yacc.NullLogger() - if outputdir is None: - outputdir = os.path.dirname(os.path.dirname(__file__)) - yacc_kwargs["outputdir"] = outputdir - if yacc_debug: - # create parser on main thread - self.parser = yacc.yacc(**yacc_kwargs) - else: - self.parser = None - YaccLoader(self, yacc_kwargs) - - # Keeps track of the last token given to yacc (the lookahead token) - self._last_yielded_token = None - - def reset(self): - """Resets for clean parsing.""" - self.lexer.reset() - self._last_yielded_token = None - self._lines = None - self.xonsh_code = None - - def parse(self, s, filename="", mode="exec", debug_level=0): - """Returns an abstract syntax tree of xonsh code. - - Parameters - ---------- - s : str - The xonsh code. - filename : str, optional - Name of the file. - mode : str, optional - Execution mode, one of: exec, eval, or single. - debug_level : str, optional - Debugging level passed down to yacc. - - Returns - ------- - tree : AST - """ - self.reset() - self.xonsh_code = s - self.lexer.fname = filename - while self.parser is None: - time.sleep(0.01) # block until the parser is ready - tree = self.parser.parse(input=s, lexer=self.lexer, debug=debug_level) - if tree is not None: - check_contexts(tree) - # hack for getting modes right - if mode == "single": - if isinstance(tree, ast.Expression): - tree = ast.Interactive(body=[self.expr(tree.body)]) - elif isinstance(tree, ast.Module): - tree = ast.Interactive(body=tree.body) - return tree - - def _lexer_errfunc(self, msg, line, column): - self._parse_error(msg, self.currloc(line, column)) - - def _yacc_lookahead_token(self): - """Gets the next-to-last and last token seen by the lexer.""" - return self.lexer.beforelast, self.lexer.last - - def _opt_rule(self, rulename): - """For a rule name, creates an associated optional rule. - '_opt' is appended to the rule name. - """ - - def optfunc(self, p): - p[0] = p[1] - - optfunc.__doc__ = ("{0}_opt : empty\n" " | {0}").format(rulename) - optfunc.__name__ = "p_" + rulename + "_opt" - setattr(self.__class__, optfunc.__name__, optfunc) - - def _list_rule(self, rulename): - """For a rule name, creates an associated list rule. - '_list' is appended to the rule name. - """ - - def listfunc(self, p): - p[0] = p[1] if len(p) == 2 else p[1] + p[2] - - listfunc.__doc__ = ("{0}_list : {0}\n" " | {0}_list {0}").format( - rulename - ) - listfunc.__name__ = "p_" + rulename + "_list" - setattr(self.__class__, listfunc.__name__, listfunc) - - def _tok_rule(self, rulename): - """For a rule name, creates a rule that returns the corresponding token. - '_tok' is appended to the rule name. - """ - - def tokfunc(self, p): - s, t = self._yacc_lookahead_token() - uprule = rulename.upper() - if s is not None and s.type == uprule: - p[0] = s - elif t is not None and t.type == uprule: - p[0] = t - else: - raise TypeError("token for {0!r} not found.".format(rulename)) - - tokfunc.__doc__ = "{0}_tok : {1}".format(rulename, rulename.upper()) - tokfunc.__name__ = "p_" + rulename + "_tok" - setattr(self.__class__, tokfunc.__name__, tokfunc) - - def currloc(self, lineno, column=None): - """Returns the current location.""" - return Location(fname=self.lexer.fname, lineno=lineno, column=column) - - def expr(self, p): - """Creates an expression for a token.""" - expr = ast.Expr(value=p, lineno=p.lineno, col_offset=p.col_offset) - expr.max_lineno = self.lineno - expr.max_col = self.col - return expr - - def token_col(self, t): - """Gets ths token column""" - return t.lexpos - - @property - def lineno(self): - if self.lexer.last is None: - return 1 - else: - return self.lexer.last.lineno - - @property - def col(self): - s, t = self._yacc_lookahead_token() - if t is not None: - if t.type == "NEWLINE": - t = s - return self.token_col(t) - return 0 - - @property - def lines(self): - if self._lines is None and self.xonsh_code is not None: - self._lines = self.xonsh_code.splitlines(keepends=True) - return self._lines - - def source_slice(self, start, stop): - """Gets the original source code from two (line, col) tuples in - source-space (i.e. lineno start at 1). - """ - bline, bcol = start - eline, ecol = stop - bline -= 1 - lines = self.lines[bline:eline] - if ecol == 0: - explen = eline - bline - if explen == len(lines) and explen > 1: - lines[-1] = "" - else: - lines[-1] = lines[-1][:ecol] - lines[0] = lines[0][bcol:] - return "".join(lines) - - def _parse_error(self, msg, loc): - if self.xonsh_code is None or loc is None: - err_line_pointer = "" - else: - col = loc.column + 1 - lines = self.lines - if loc.lineno == 0: - loc.lineno = len(lines) - i = loc.lineno - 1 - if 0 <= i < len(lines): - err_line = lines[i].rstrip() - err_line_pointer = "\n{}\n{: >{}}".format(err_line, "^", col) - else: - err_line_pointer = "" - err = SyntaxError("{0}: {1}{2}".format(loc, msg, err_line_pointer)) - err.loc = loc - raise err - - # - # Precedence of operators - # - precedence = ( - ("left", "PIPE"), - ("left", "XOR"), - ("left", "AMPERSAND"), - ("left", "EQ", "NE"), - ("left", "GT", "GE", "LT", "LE"), - ("left", "RSHIFT", "LSHIFT"), - ("left", "PLUS", "MINUS"), - ("left", "TIMES", "DIVIDE", "DOUBLEDIV", "MOD"), - ("left", "POW"), - ) - - # - # Grammar as defined by BNF - # - - def p_start_symbols(self, p): - """start_symbols : single_input - | file_input - | eval_input - | empty - """ - p[0] = p[1] - - def p_single_input(self, p): - """single_input : compound_stmt NEWLINE - """ - p1 = empty_list_if_newline(p[1]) - p0 = ast.Interactive(body=p1) - p[0] = p0 - - def p_file_input(self, p): - """file_input : file_stmts""" - p[0] = ast.Module(body=p[1]) - - def p_file_stmts_nl(self, p): - """file_stmts : newline_or_stmt""" - # newline_or_stmt ENDMARKER - p[0] = empty_list_if_newline(p[1]) - - def p_file_stmts_files(self, p): - """file_stmts : file_stmts newline_or_stmt""" - # file_input newline_or_stmt ENDMARKER - p2 = empty_list_if_newline(p[2]) - p[0] = p[1] + p2 - - def p_newline_or_stmt(self, p): - """newline_or_stmt : NEWLINE - | stmt - """ - p[0] = p[1] - - def p_newlines(self, p): - """newlines : NEWLINE - | newlines NEWLINE - """ - p[0] = p[1] if len(p) == 2 else p[1] + p[2] - - def p_eval_input(self, p): - """eval_input : testlist newlines_opt - """ - p1 = p[1] - p[0] = ast.Expression(body=p1, lineno=p1.lineno, col_offset=p1.col_offset) - - def p_func_call(self, p): - """func_call : LPAREN arglist_opt RPAREN""" - p[0] = p[2] - - def p_attr_period_name(self, p): - """attr_period_name : PERIOD NAME""" - p[0] = [p[2]] - - def p_attr_name_alone(self, p): - """attr_name : name_tok""" - p1 = p[1] - p[0] = ast.Name( - id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos - ) - - def p_attr_name_with(self, p): - """attr_name : name_tok attr_period_name_list""" - p1 = p[1] - name = ast.Name( - id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos - ) - p2 = p[2] - p0 = ast.Attribute( - value=name, - attr=p2[0], - ctx=ast.Load(), - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - for a in p2[1:]: - p0 = ast.Attribute( - value=p0, - attr=a, - ctx=ast.Load(), - lineno=p0.lineno, - col_offset=p0.col_offset, - ) - p[0] = p0 - - def p_decorator_no_call(self, p): - """decorator : at_tok attr_name NEWLINE""" - p[0] = p[2] - - def p_decorator_call(self, p): - """decorator : at_tok attr_name func_call NEWLINE""" - p1, name, p3 = p[1], p[2], p[3] - if isinstance(name, ast.Attribute) or (p3 is not None): - lineno, col = name.lineno, name.col_offset - else: - lineno, col = p1.lineno, p1.lexpos - if p3 is None: - p0 = ast.Call( - func=name, - args=[], - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ) - else: - p0 = ast.Call(func=name, lineno=lineno, col_offset=col, **p3) - p[0] = p0 - - def p_decorators(self, p): - """decorators : decorator - | decorators decorator - """ - p[0] = [p[1]] if len(p) == 2 else p[1] + [p[2]] - - def p_decorated(self, p): - """decorated : decorators classdef_or_funcdef""" - p1, p2 = p[1], p[2] - targ = p2[0] - targ.decorator_list = p1 - # this is silly, CPython. This claims a func or class starts on - # the line of the first decorator, rather than the 'def' or 'class' - # line. However, it retains the original col_offset. - targ.lineno = p1[0].lineno - # async functions take the col number of the 'def', unless they are - # decorated, in which case they have the col of the 'async'. WAT? - if hasattr(targ, "_async_tok"): - targ.col_offset = targ._async_tok.lexpos - del targ._async_tok - p[0] = p2 - - def p_rarrow_test(self, p): - """rarrow_test : RARROW test""" - p[0] = p[2] - - def p_funcdef(self, p): - """funcdef : def_tok NAME parameters rarrow_test_opt COLON suite""" - f = ast.FunctionDef( - name=p[2], - args=p[3], - returns=p[4], - body=p[6], - decorator_list=[], - lineno=p[1].lineno, - col_offset=p[1].lexpos, - ) - p[0] = [f] - - def p_parameters(self, p): - """parameters : LPAREN typedargslist_opt RPAREN""" - p2 = p[2] - if p2 is None: - p2 = ast.arguments( - args=[], - vararg=None, - kwonlyargs=[], - kw_defaults=[], - kwarg=None, - defaults=[], - ) - p[0] = p2 - - def p_equals_test(self, p): - """equals_test : EQUALS test""" - p[0] = p[2] - - def p_typedargslist_kwarg(self, p): - """typedargslist : POW tfpdef""" - p[0] = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[2], defaults=[] - ) - - def p_typedargslist_times4_tfpdef(self, p): - """typedargslist : TIMES tfpdef comma_pow_tfpdef_opt""" - # *args, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[3], defaults=[] - ) - self._set_var_args(p0, p[2], None) - p[0] = p0 - - def p_typedargslist_times4_comma(self, p): - """typedargslist : TIMES comma_pow_tfpdef""" - # *, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[2], defaults=[] - ) - p[0] = p0 - - def p_typedargslist_times5_tdpdef(self, p): - """typedargslist : TIMES tfpdef comma_tfpdef_list comma_pow_tfpdef_opt""" - # *args, x, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[4], defaults=[] - ) - self._set_var_args(p0, p[2], p[3]) # *args - p[0] = p0 - - def p_typedargslist_times5_comma(self, p): - """typedargslist : TIMES comma_tfpdef_list comma_pow_tfpdef_opt""" - # *, x, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[3], defaults=[] - ) - self._set_var_args(p0, None, p[2]) # *args - p[0] = p0 - - def p_typedargslist_t5(self, p): - """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt""" - # x - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - p[0] = p0 - - def p_typedargslist_t7(self, p): - """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt POW tfpdef""" - # x, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[6], defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - p[0] = p0 - - def p_typedargslist_t8(self, p): - """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list_opt""" - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], p[7]) - p[0] = p0 - - def p_typedargslist_t10(self, p): - """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt COMMA POW vfpdef""" - # x, *args, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[9], defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], None) - p[0] = p0 - - def p_typedargslist_t11(self, p): - """typedargslist : tfpdef equals_test_opt comma_tfpdef_list_opt comma_opt TIMES tfpdef_opt comma_tfpdef_list COMMA POW tfpdef""" - # x, *args, **kwargs - p0 = ast.arguments( - args=[], - vararg=None, - kwonlyargs=[], - kw_defaults=[], - kwarg=p[10], - defaults=[], - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], p[7]) - p[0] = p0 - - def p_colon_test(self, p): - """colon_test : COLON test""" - p[0] = p[2] - - def p_tfpdef(self, p): - """tfpdef : name_tok colon_test_opt""" - p1 = p[1] - kwargs = {"arg": p1.value, "annotation": p[2]} - if PYTHON_VERSION_INFO >= (3, 5, 1): - kwargs.update({"lineno": p1.lineno, "col_offset": p1.lexpos}) - p[0] = ast.arg(**kwargs) - - def p_comma_tfpdef_empty(self, p): - """comma_tfpdef : COMMA""" - p[0] = [] - - def p_comma_tfpdef_args(self, p): - """comma_tfpdef : COMMA tfpdef equals_test_opt""" - p[0] = [{"arg": p[2], "default": p[3]}] - - def p_comma_pow_tfpdef(self, p): - """comma_pow_tfpdef : COMMA POW tfpdef""" - p[0] = p[3] - - def _set_args_def(self, argmts, vals, kwargs=False): - args, defs = ( - (argmts.kwonlyargs, argmts.kw_defaults) - if kwargs - else (argmts.args, argmts.defaults) - ) - if vals is None and kwargs: - loc = self.currloc(self.lineno, self.col) - self._parse_error("named arguments must follow bare *", loc) - for v in vals: - args.append(v["arg"]) - d = v["default"] - if kwargs or (d is not None): - defs.append(d) - - def _set_regular_args(self, p0, p1, p2, p3, p4): - if p2 is None and p3 is None: - # x - p0.args.append(p1) - elif p2 is not None and p3 is None: - # x=42 - p0.args.append(p1) - p0.defaults.append(p2) - elif p2 is None and p3 is not None: - # x, y and x, y=42 - p0.args.append(p1) - self._set_args_def(p0, p3) - else: - # x=42, y=42 - p0.args.append(p1) - p0.defaults.append(p2) - self._set_args_def(p0, p3) - - def _set_var_args(self, p0, vararg, kwargs): - if vararg is None and kwargs is not None: - self._set_args_def(p0, kwargs, kwargs=True) - elif vararg is not None and kwargs is None: - # *args - p0.vararg = vararg - elif vararg is not None and kwargs is not None: - # *args, x and *args, x, y and *args, x=10 and *args, x=10, y - # and *args, x, y=10, and *args, x=42, y=65 - p0.vararg = vararg - self._set_args_def(p0, kwargs, kwargs=True) - else: - assert False - - def p_varargslist_kwargs(self, p): - """varargslist : POW vfpdef""" - p[0] = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[2], defaults=[] - ) - - def p_varargslist_times4(self, p): - """varargslist : TIMES vfpdef_opt comma_pow_vfpdef_opt""" - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[3], defaults=[] - ) - self._set_var_args(p0, p[2], None) - p[0] = p0 - - def p_varargslist_times5(self, p): - """varargslist : TIMES vfpdef_opt comma_vfpdef_list comma_pow_vfpdef_opt""" - # *args, x, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[4], defaults=[] - ) - self._set_var_args(p0, p[2], p[3]) # *args - p[0] = p0 - - def p_varargslist_v5(self, p): - """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt""" - # x - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - p[0] = p0 - - def p_varargslist_v7(self, p): - """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt POW vfpdef""" - # x, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[6], defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - p[0] = p0 - - def p_varargslist_v8(self, p): - """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list_opt""" - # x, *args - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=None, defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], p[7]) - p[0] = p0 - - def p_varargslist_v10(self, p): - """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt COMMA POW vfpdef""" - # x, *args, **kwargs - p0 = ast.arguments( - args=[], vararg=None, kwonlyargs=[], kw_defaults=[], kwarg=p[9], defaults=[] - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], None) - p[0] = p0 - - def p_varargslist_v11(self, p): - """varargslist : vfpdef equals_test_opt comma_vfpdef_list_opt comma_opt TIMES vfpdef_opt comma_vfpdef_list COMMA POW vfpdef""" - p0 = ast.arguments( - args=[], - vararg=None, - kwonlyargs=[], - kw_defaults=[], - kwarg=p[10], - defaults=[], - ) - self._set_regular_args(p0, p[1], p[2], p[3], p[4]) - self._set_var_args(p0, p[6], p[7]) - p[0] = p0 - - def p_vfpdef(self, p): - """vfpdef : name_tok""" - p1 = p[1] - kwargs = {"arg": p1.value, "annotation": None} - if PYTHON_VERSION_INFO >= (3, 5, 1): - kwargs.update({"lineno": p1.lineno, "col_offset": p1.lexpos}) - p[0] = ast.arg(**kwargs) - - def p_comma_vfpdef_empty(self, p): - """comma_vfpdef : COMMA""" - p[0] = [] - - def p_comma_vfpdef_value(self, p): - """comma_vfpdef : COMMA vfpdef equals_test_opt""" - p[0] = [{"arg": p[2], "default": p[3]}] - - def p_comma_pow_vfpdef(self, p): - """comma_pow_vfpdef : COMMA POW vfpdef""" - p[0] = p[3] - - def p_stmt(self, p): - """stmt : simple_stmt - | compound_stmt - """ - p[0] = p[1] - - def p_stmt_list(self, p): - """stmt_list : stmt - | stmt_list stmt - """ - if len(p) == 2: - p[0] = p[1] - else: - p[0] = p[1] + p[2] - - def p_semi_opt(self, p): - """semi_opt : SEMI - | empty - """ - if len(p) == 2: - p[0] = p[1] - - def p_semi_small_stmt(self, p): - """semi_small_stmt : SEMI small_stmt""" - p[0] = [p[2]] - - def p_simple_stmt_single(self, p): - """simple_stmt : small_stmt semi_opt NEWLINE""" - p[0] = [p[1]] - - def p_simple_stmt_many(self, p): - """simple_stmt : small_stmt semi_small_stmt_list semi_opt NEWLINE""" - p[0] = [p[1]] + p[2] - - def p_small_stmt(self, p): - """small_stmt : expr_stmt - | del_stmt - | pass_stmt - | flow_stmt - | import_stmt - | global_stmt - | nonlocal_stmt - | assert_stmt - """ - p[0] = p[1] - - _augassign_op = { - "+=": ast.Add, - "-=": ast.Sub, - "*=": ast.Mult, - "@=": ast.MatMult, - "/=": ast.Div, - "%=": ast.Mod, - "//=": ast.FloorDiv, - "**=": ast.Pow, - "^=": ast.BitXor, - "&=": ast.BitAnd, - "|=": ast.BitOr, - "<<=": ast.LShift, - ">>=": ast.RShift, - } - - def p_expr_stmt_testlist_assign(self, p): - """expr_stmt : testlist_star_expr equals_yield_expr_or_testlist_list_opt - | testlist equals_yield_expr_or_testlist_list_opt - """ - p1, p2 = p[1], p[2] - if isinstance(p1, ast.Tuple): - p1 = [p1] - if p2 is None and len(p1) == 1: - p[0] = self.expr(p1[0]) - elif p2 is None: - assert False - else: - for targ in p1: - store_ctx(targ) - list(map(store_ctx, p2[:-1])) - lineno, col = lopen_loc(p1[0]) - p[0] = ast.Assign( - targets=p1 + p2[:-1], value=p2[-1], lineno=lineno, col_offset=col - ) - - def p_expr_stmt_augassign(self, p): - """expr_stmt : testlist_star_expr augassign yield_expr_or_testlist""" - p1, p2 = p[1], p[2] - if not isinstance(p1, ast.Tuple): - p1 = p1[0] - store_ctx(p1) - op = self._augassign_op[p2] - if op is None: - self._parse_error( - "operation {0!r} not supported".format(p2), - self.currloc(lineno=p.lineno, column=p.lexpos), - ) - p[0] = ast.AugAssign( - target=p1, op=op(), value=p[3], lineno=p1.lineno, col_offset=p1.col_offset - ) - - def store_star_expr(self, p1, p2, targs, rhs): - """Stores complex unpacking statements that target *x variables.""" - p1 = [] if p1 is None else p1 - if isinstance(p1, ast.Tuple): - p1 = [p1] - for targ in p1: - store_ctx(targ) - store_ctx(p2) - for targ in targs: - store_ctx(targ) - p1.append(p2) - p1.extend(targs) - p1 = [ - ast.Tuple( - elts=p1, - ctx=ast.Store(), - lineno=p1[0].lineno, - col_offset=p1[0].col_offset, - ) - ] - p0 = ast.Assign( - targets=p1, value=rhs, lineno=p1[0].lineno, col_offset=p1[0].col_offset - ) - return p0 - - def p_expr_stmt_star5(self, p): - """expr_stmt : test_comma_list_opt star_expr comma_test_list equals_yield_expr_or_testlist""" - targs, rhs = p[3], p[4][0] - p[0] = self.store_star_expr(p[1], p[2], targs, rhs) - - def p_expr_stmt_star6(self, p): - """expr_stmt : test_comma_list_opt star_expr comma_opt test_comma_list_opt equals_yield_expr_or_testlist""" - targs, rhs = (p[4] or []), p[5][0] - p[0] = self.store_star_expr(p[1], p[2], targs, rhs) - - def p_test_comma(self, p): - """test_comma : test COMMA""" - p[0] = [p[1]] - - def p_comma_opt(self, p): - """comma_opt : COMMA - | empty - """ - if len(p) == 2: - p[0] = p[1] - - def p_test_or_star_expr(self, p): - """test_or_star_expr : test - | star_expr - """ - p[0] = p[1] - - def p_comma_test_or_star_expr(self, p): - """comma_test_or_star_expr : COMMA test_or_star_expr""" - p[0] = [p[2]] - - def p_testlist_star_expr(self, p): - """testlist_star_expr : test_or_star_expr comma_test_or_star_expr_list comma_opt - | test_or_star_expr comma_opt - """ - p1, p2 = p[1], p[2] - if p2 is None: - p0 = [p1] - elif p2 == ",": - p0 = [ - ast.Tuple( - elts=[p1], - ctx=ast.Load(), - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - ] - else: - p0 = [ - ast.Tuple( - elts=[p1] + p2, - ctx=ast.Load(), - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - ] - p[0] = p0 - - def p_augassign(self, p): - """augassign : PLUSEQUAL - | MINUSEQUAL - | TIMESEQUAL - | ATEQUAL - | DIVEQUAL - | MODEQUAL - | AMPERSANDEQUAL - | PIPEEQUAL - | XOREQUAL - | LSHIFTEQUAL - | RSHIFTEQUAL - | POWEQUAL - | DOUBLEDIVEQUAL - """ - p[0] = p[1] - - def p_yield_expr_or_testlist(self, p): - """yield_expr_or_testlist : yield_expr - | testlist - """ - p[0] = p[1] - - def p_equals_yield_expr_or_testlist(self, p): - """equals_yield_expr_or_testlist : EQUALS yield_expr_or_testlist""" - p[0] = [p[2]] - - # - # For normal assignments, additional restrictions enforced - # by the interpreter - # - def p_del_stmt(self, p): - """del_stmt : del_tok exprlist""" - p1 = p[1] - p2 = p[2] - for targ in p2: - del_ctx(targ) - p0 = ast.Delete( - targets=p2, ctx=ast.Del(), lineno=p1.lineno, col_offset=p1.lexpos - ) - p[0] = p0 - - def p_pass_stmt(self, p): - """pass_stmt : PASS""" - p[0] = ast.Pass(lineno=self.lineno, col_offset=self.col) - - def p_flow_stmt(self, p): - """flow_stmt : break_stmt - | continue_stmt - | return_stmt - | raise_stmt - | yield_stmt - """ - p[0] = p[1] - - def p_break_stmt(self, p): - """break_stmt : BREAK""" - p[0] = ast.Break(lineno=self.lineno, col_offset=self.col) - - def p_continue_stmt(self, p): - """continue_stmt : CONTINUE""" - p[0] = ast.Continue(lineno=self.lineno, col_offset=self.col) - - def p_return_stmt(self, p): - """return_stmt : return_tok testlist_opt""" - p1 = p[1] - p[0] = ast.Return(value=p[2], lineno=p1.lineno, col_offset=p1.lexpos) - - def p_yield_stmt(self, p): - """yield_stmt : yield_expr""" - p[0] = self.expr(p[1]) - - def p_raise_stmt_r1(self, p): - """raise_stmt : raise_tok""" - p1 = p[1] - p[0] = ast.Raise(exc=None, cause=None, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_raise_stmt_r2(self, p): - """raise_stmt : raise_tok test""" - p1 = p[1] - p[0] = ast.Raise(exc=p[2], cause=None, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_raise_stmt_r3(self, p): - """raise_stmt : raise_tok test FROM test""" - p1 = p[1] - p[0] = ast.Raise(exc=p[2], cause=p[4], lineno=p1.lineno, col_offset=p1.lexpos) - - def p_import_stmt(self, p): - """import_stmt : import_name - | import_from - """ - p[0] = p[1] - - def p_import_name(self, p): - """import_name : import_tok dotted_as_names - """ - p1 = p[1] - p[0] = ast.Import(names=p[2], lineno=p1.lineno, col_offset=p1.lexpos) - - def p_import_from_pre_f3(self, p): - """import_from_pre : from_tok period_or_ellipsis_list""" - p1 = p[1] - p[0] = (p[2], p1.lineno, p1.lexpos) - - def p_import_from_pre_f4(self, p): - """import_from_pre : from_tok period_or_ellipsis_list_opt dotted_name""" - p1, p2, p3 = p[1], p[2], p[3] - p0 = p3 if p2 is None else p2 + p3 - p[0] = (p0, p1.lineno, p1.lexpos) - - def p_import_from_post_times(self, p): - """import_from_post : TIMES""" - p[0] = [ast.alias(name="*", asname=None)] - - def p_import_from_post_as(self, p): - """import_from_post : import_as_names""" - p[0] = p[1] - - def p_import_from_post_paren(self, p): - """import_from_post : LPAREN import_as_names""" - p[0] = p[2] - - def p_import_from(self, p): - """import_from : import_from_pre IMPORT import_from_post""" - # note below: the ('.' | '...') is necessary because '...' is - # tokenized as ELLIPSIS - p1, lineno, col = p[1] - mod = p1.lstrip(".") - lvl = len(p1) - len(mod) - mod = mod or None - p[0] = ast.ImportFrom( - module=mod, names=p[3], level=lvl, lineno=lineno, col_offset=col - ) - - def p_period_or_ellipsis(self, p): - """period_or_ellipsis : PERIOD - | ELLIPSIS - """ - p[0] = p[1] - - def p_as_name(self, p): - """as_name : AS NAME""" - p[0] = p[2] - - def p_import_as_name(self, p): - """import_as_name : NAME as_name_opt""" - p[0] = ast.alias(name=p[1], asname=p[2]) - - def p_comma_import_as_name(self, p): - """comma_import_as_name : COMMA import_as_name - """ - p[0] = [p[2]] - - def p_comma_import_as_name_tail(self, p): - """comma_import_as_name : comma_opt RPAREN""" - p[0] = list() - - def p_dotted_as_name(self, p): - """dotted_as_name : dotted_name as_name_opt""" - p0 = ast.alias(name=p[1], asname=p[2]) - p[0] = p0 - - def p_comma_dotted_as_name(self, p): - """comma_dotted_as_name : COMMA dotted_as_name""" - p[0] = [p[2]] - - def p_import_as_names(self, p): - """import_as_names : import_as_name comma_import_as_name_list_opt""" - p1, p2 = p[1], p[2] - p0 = [p1] - if p2 is not None: - p0.extend(p2) - p[0] = p0 - - def p_dotted_as_names(self, p): - """dotted_as_names : dotted_as_name comma_dotted_as_name_list_opt""" - p1, p2 = p[1], p[2] - p0 = [p1] - if p2 is not None: - p0.extend(p2) - p[0] = p0 - - def p_period_name(self, p): - """period_name : PERIOD NAME""" - p[0] = p[1] + p[2] - - def p_dotted_name(self, p): - """dotted_name : NAME - | NAME period_name_list - """ - p[0] = p[1] if len(p) == 2 else p[1] + p[2] - - def p_comma_name(self, p): - """comma_name : COMMA NAME""" - p[0] = [p[2]] - - def p_global_stmt(self, p): - """global_stmt : global_tok NAME comma_name_list_opt""" - p1, p2, p3 = p[1], p[2], p[3] - names = [p2] - if p3 is not None: - names += p3 - p[0] = ast.Global(names=names, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_nonlocal_stmt(self, p): - """nonlocal_stmt : nonlocal_tok NAME comma_name_list_opt""" - p1, p2, p3 = p[1], p[2], p[3] - names = [p2] - if p3 is not None: - names += p3 - p[0] = ast.Nonlocal(names=names, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_comma_test(self, p): - """comma_test : COMMA test""" - p[0] = [p[2]] - - def p_assert_stmt(self, p): - """assert_stmt : assert_tok test comma_test_opt""" - p1, p2, p3 = p[1], p[2], p[3] - if p3 is not None: - if len(p3) != 1: - assert False - p3 = p3[0] - p[0] = ast.Assert(test=p2, msg=p3, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_compound_stmt(self, p): - """compound_stmt : if_stmt - | while_stmt - | for_stmt - | try_stmt - | with_stmt - | funcdef - | classdef - | decorated - """ - p[0] = p[1] - - def p_elif_part(self, p): - """elif_part : ELIF test COLON suite""" - p2 = p[2] - p[0] = [ - ast.If( - test=p2, - body=p[4], - orelse=[], - lineno=p2.lineno, - col_offset=p2.col_offset, - ) - ] - - def p_else_part(self, p): - """else_part : ELSE COLON suite""" - p[0] = p[3] - - def p_if_stmt(self, p): - """if_stmt : if_tok test COLON suite elif_part_list_opt - | if_tok test COLON suite elif_part_list_opt else_part - """ - p1 = p[1] - lastif = ast.If( - test=p[2], body=p[4], orelse=[], lineno=p1.lineno, col_offset=p1.lexpos - ) - p0 = [lastif] - p5 = p[5] - p6 = p[6] if len(p) > 6 else [] - if p5 is not None: - for elseif in p5: - lastif.orelse.append(elseif) - lastif = elseif - lastif.orelse = p6 - p[0] = p0 - - def p_while_stmt(self, p): - """while_stmt : WHILE test COLON suite - | WHILE test COLON suite else_part - """ - p5 = p[5] if len(p) > 5 else [] - p[0] = [ - ast.While( - test=p[2], body=p[4], orelse=p5, lineno=self.lineno, col_offset=self.col - ) - ] - - def p_for_stmt(self, p): - """for_stmt : for_tok exprlist IN testlist COLON suite - | for_tok exprlist IN testlist COLON suite else_part - """ - p1, p2 = p[1], p[2] - p7 = p[7] if len(p) > 7 else [] - if len(p2) == 1: - p2 = p2[0] - store_ctx(p2) - else: - for x in p2: - store_ctx(x) - p2 = ast.Tuple( - elts=p2, - ctx=ast.Store(), - lineno=p2[0].lineno, - col_offset=p2[0].col_offset, - ) - p[0] = [ - ast.For( - target=p2, - iter=p[4], - body=p[6], - orelse=p7, - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_except_part(self, p): - """except_part : except_clause COLON suite""" - p0 = p[1] - p0.body = p[3] - p[0] = [p0] - - def p_finally_part(self, p): - """finally_part : FINALLY COLON suite""" - p[0] = p[3] - - def p_try_stmt_t5(self, p): - """try_stmt : try_tok COLON suite finally_part""" - p1 = p[1] - p[0] = [ - ast.Try( - body=p[3], - handlers=[], - orelse=[], - finalbody=p[4], - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_try_stmt_t6(self, p): - """try_stmt : try_tok COLON suite except_part_list finally_part_opt""" - p1 = p[1] - p[0] = [ - ast.Try( - body=p[3], - handlers=p[4], - orelse=[], - finalbody=([] if p[5] is None else p[5]), - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_try_stmt_t7(self, p): - """try_stmt : try_tok COLON suite except_part_list else_part finally_part_opt""" - p1 = p[1] - p[0] = [ - ast.Try( - body=p[3], - handlers=p[4], - orelse=([] if p[5] is None else p[5]), - finalbody=([] if p[6] is None else p[6]), - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_with_stmt_w5(self, p): - """with_stmt : with_tok with_item COLON suite""" - p1 = p[1] - p[0] = [ - ast.With(items=[p[2]], body=p[4], lineno=p1.lineno, col_offset=p1.lexpos) - ] - - def p_with_stmt_p6(self, p): - """with_stmt : with_tok with_item comma_with_item_list COLON suite""" - p1 = p[1] - p[0] = [ - ast.With( - items=[p[2]] + p[3], body=p[5], lineno=p1.lineno, col_offset=p1.lexpos - ) - ] - - def p_with_bang_stmt_single_suite(self, p): - """with_stmt : with_tok BANG with_item rawsuite""" - p1, p3, p4 = p[1], p[3], p[4] - expr = p3.context_expr - l, c = expr.lineno, expr.col_offset - gblcall = xonsh_call("globals", [], lineno=l, col=c) - loccall = xonsh_call("locals", [], lineno=l, col=c) - margs = [expr, p4, gblcall, loccall] - p3.context_expr = xonsh_call("__xonsh__.enter_macro", margs, lineno=l, col=c) - body = [ast.Pass(lineno=p4.lineno, col_offset=p4.col_offset)] - p[0] = [ast.With(items=[p3], body=body, lineno=p1.lineno, col_offset=p1.lexpos)] - - def p_with_bang_stmt_many_suite(self, p): - """with_stmt : with_tok BANG with_item comma_with_item_list rawsuite""" - p1, p3, p4, p5 = p[1], p[3], p[4], p[5] - items = [p3] + p4 - for item in items: - expr = item.context_expr - l, c = expr.lineno, expr.col_offset - gblcall = xonsh_call("globals", [], lineno=l, col=c) - loccall = xonsh_call("locals", [], lineno=l, col=c) - margs = [expr, p5, gblcall, loccall] - item.context_expr = xonsh_call( - "__xonsh__.enter_macro", margs, lineno=l, col=c - ) - body = [ast.Pass(lineno=p5.lineno, col_offset=p5.col_offset)] - p[0] = [ - ast.With(items=items, body=body, lineno=p1.lineno, col_offset=p1.lexpos) - ] - - def p_as_expr(self, p): - """as_expr : AS expr""" - p2 = p[2] - store_ctx(p2) - p[0] = p2 - - def p_with_item(self, p): - """with_item : test - | test as_expr - """ - p2 = p[2] if len(p) > 2 else None - p[0] = ast.withitem(context_expr=p[1], optional_vars=p2) - - def p_comma_with_item(self, p): - """comma_with_item : COMMA with_item""" - p[0] = [p[2]] - - def p_except_clause_e2(self, p): - """except_clause : except_tok""" - p1 = p[1] - p[0] = ast.ExceptHandler( - type=None, name=None, lineno=p1.lineno, col_offset=p1.lexpos - ) - - def p_except_clause(self, p): - """except_clause : except_tok test as_name_opt""" - p1 = p[1] - p[0] = ast.ExceptHandler( - type=p[2], name=p[3], lineno=p1.lineno, col_offset=p1.lexpos - ) - - def p_suite(self, p): - """suite : simple_stmt - | NEWLINE INDENT stmt_list DEDENT - """ - p[0] = p[1] if len(p) == 2 else p[3] - - def p_rawsuite_indent(self, p): - """rawsuite : COLON NEWLINE indent_tok nodedent dedent_tok""" - p3, p5 = p[3], p[5] - beg = (p3.lineno, p3.lexpos) - end = (p5.lineno, p5.lexpos) - s = self.source_slice(beg, end) - s = textwrap.dedent(s) - p[0] = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) - - def p_rawsuite_simple_stmt(self, p): - """rawsuite : colon_tok nonewline newline_tok""" - p1, p3 = p[1], p[3] - beg = (p1.lineno, p1.lexpos + 1) - end = (p3.lineno, p3.lexpos) - s = self.source_slice(beg, end).strip() - p[0] = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) - - def _attach_nodedent_base_rules(self): - toks = set(self.tokens) - toks.remove("DEDENT") - ts = "\n | ".join(sorted(toks)) - doc = "nodedent : " + ts + "\n" - self.p_nodedent_base.__func__.__doc__ = doc - - def p_nodedent_base(self, p): - # see above attachment function - pass - - def p_nodedent_any(self, p): - """nodedent : INDENT any_dedent_toks DEDENT""" - pass - - def p_nodedent_many(self, p): - """nodedent : nodedent nodedent""" - pass - - def p_any_dedent_tok(self, p): - """any_dedent_tok : nodedent - | DEDENT - """ - pass - - def p_any_dedent_toks(self, p): - """any_dedent_toks : any_dedent_tok - | any_dedent_toks any_dedent_tok - """ - pass - - def _attach_nonewline_base_rules(self): - toks = set(self.tokens) - toks -= { - "NEWLINE", - "LPAREN", - "RPAREN", - "LBRACE", - "RBRACE", - "LBRACKET", - "RBRACKET", - "AT_LPAREN", - "BANG_LPAREN", - "BANG_LBRACKET", - "DOLLAR_LPAREN", - "DOLLAR_LBRACE", - "DOLLAR_LBRACKET", - "ATDOLLAR_LPAREN", - } - ts = "\n | ".join(sorted(toks)) - doc = "nonewline : " + ts + "\n" - self.p_nonewline_base.__func__.__doc__ = doc - - def p_nonewline_base(self, p): - # see above attachment function - pass - - def p_nonewline_any(self, p): - """nonewline : any_nested_raw""" - pass - - def p_nonewline_many(self, p): - """nonewline : nonewline nonewline""" - pass - - def p_test_ol(self, p): - """test : or_test - | lambdef - """ - p[0] = p[1] - - def p_test_o5(self, p): - """test : or_test IF or_test ELSE test""" - p[0] = ast.IfExp( - test=p[3], body=p[1], orelse=p[5], lineno=self.lineno, col_offset=self.col - ) - - def p_test_nocond(self, p): - """test_nocond : or_test - | lambdef_nocond - """ - p[0] = p[1] - - def p_lambdef(self, p): - """lambdef : lambda_tok varargslist_opt COLON test""" - p1, p2, p4 = p[1], p[2], p[4] - if p2 is None: - args = ast.arguments( - args=[], - vararg=None, - kwonlyargs=[], - kw_defaults=[], - kwarg=None, - defaults=[], - ) - else: - args = p2 - p0 = ast.Lambda(args=args, body=p4, lineno=p1.lineno, col_offset=p1.lexpos) - p[0] = p0 - - def p_lambdef_nocond(self, p): - """lambdef_nocond : LAMBDA varargslist_opt COLON test_nocond""" - assert False - - def p_or_test(self, p): - """or_test : and_test or_and_test_list_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - p0 = p1 - elif len(p2) == 2: - lineno, col = lopen_loc(p1) - p0 = ast.BoolOp(op=p2[0], values=[p1, p2[1]], lineno=lineno, col_offset=col) - else: - lineno, col = lopen_loc(p1) - p0 = ast.BoolOp( - op=p2[0], values=[p[1]] + p2[1::2], lineno=lineno, col_offset=col - ) - p[0] = p0 - - def p_or_and_test(self, p): - """or_and_test : OR and_test""" - p[0] = [ast.Or(), p[2]] - - def p_and_test(self, p): - """and_test : not_test and_not_test_list_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - p0 = p1 - elif len(p2) == 2: - lineno, col = lopen_loc(p1) - p0 = ast.BoolOp(op=p2[0], values=[p1, p2[1]], lineno=lineno, col_offset=col) - else: - lineno, col = lopen_loc(p1) - p0 = ast.BoolOp( - op=p2[0], values=[p1] + p2[1::2], lineno=lineno, col_offset=col - ) - p[0] = p0 - - def p_and_not_test(self, p): - """and_not_test : AND not_test""" - p[0] = [ast.And(), p[2]] - - def p_not_test_not(self, p): - """not_test : NOT not_test""" - p[0] = ast.UnaryOp( - op=ast.Not(), operand=p[2], lineno=self.lineno, col_offset=self.col - ) - - def p_not_test(self, p): - """not_test : comparison""" - p[0] = p[1] - - def p_comparison(self, p): - """comparison : expr comp_op_expr_list_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - p0 = p1 - else: - p0 = ast.Compare( - left=p1, - ops=p2[::2], - comparators=p2[1::2], - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - p[0] = p0 - - def p_comp_op_expr(self, p): - """comp_op_expr : comp_op expr""" - p[0] = [p[1], p[2]] - - _comp_ops = { - "<": ast.Lt, - ">": ast.Gt, - "==": ast.Eq, - ">=": ast.GtE, - "<=": ast.LtE, - "!=": ast.NotEq, - "in": ast.In, - ("not", "in"): ast.NotIn, - "is": ast.Is, - ("is", "not"): ast.IsNot, - } - - def p_comp_op_monograph(self, p): - """comp_op : LT - | GT - | EQ - | GE - | LE - | NE - | IN - | IS - """ - p[0] = self._comp_ops[p[1]]() - - def p_comp_op_digraph(self, p): - """comp_op : NOT IN - | IS NOT - """ - p[0] = self._comp_ops[(p[1], p[2])]() - - def p_star_expr(self, p): - """star_expr : times_tok expr""" - p1 = p[1] - p[0] = ast.Starred( - value=p[2], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos - ) - - def _binop_combine(self, p1, p2): - """Combines binary operations""" - if p2 is None: - p0 = p1 - elif isinstance(p2, ast.BinOp): - p2.left = p1 - p0 = p2 - elif isinstance(p2, Sequence) and isinstance(p2[0], ast.BinOp): - p0 = p2[0] - p0.left = p1 - p0.lineno, p0.col_offset = lopen_loc(p1) - for bop in p2[1:]: - locer = p1 if p0.left is p1 else p0 - bop.left = p0 - p0.lineno, p0.col_offset = lopen_loc(locer) - p0 = bop - else: - p0 = p1 + p2 - return p0 - - def p_expr(self, p): - """expr : xor_expr - | xor_expr pipe_xor_expr_list - """ - p[0] = self._binop_combine(p[1], p[2] if len(p) > 2 else None) - - def p_pipe_xor_expr(self, p): - """pipe_xor_expr : pipe_tok xor_expr""" - p1 = p[1] - p[0] = [ - ast.BinOp( - left=None, - op=ast.BitOr(), - right=p[2], - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_xor_expr(self, p): - """xor_expr : and_expr xor_and_expr_list_opt""" - p[0] = self._binop_combine(p[1], p[2]) - - def p_xor_and_expr(self, p): - """xor_and_expr : xor_tok and_expr""" - p1 = p[1] - p[0] = [ - ast.BinOp( - left=None, - op=ast.BitXor(), - right=p[2], - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_and_expr(self, p): - """and_expr : shift_expr ampersand_shift_expr_list_opt""" - p[0] = self._binop_combine(p[1], p[2]) - - def p_ampersand_shift_expr(self, p): - """ampersand_shift_expr : ampersand_tok shift_expr""" - p1 = p[1] - p[0] = [ - ast.BinOp( - left=None, - op=ast.BitAnd(), - right=p[2], - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - ] - - def p_shift_expr(self, p): - """shift_expr : arith_expr shift_arith_expr_list_opt""" - p[0] = self._binop_combine(p[1], p[2]) - - def p_shift_arith_expr(self, p): - """shift_arith_expr : lshift_tok arith_expr - | rshift_tok arith_expr - """ - p1 = p[1] - op = ast.LShift() if p1.value == "<<" else ast.RShift() - p[0] = [ - ast.BinOp( - left=None, op=op, right=p[2], lineno=p1.lineno, col_offset=p1.lexpos - ) - ] - - def p_arith_expr_single(self, p): - """arith_expr : term""" - p[0] = p[1] - - def p_arith_expr_many(self, p): - """arith_expr : term pm_term_list""" - p1, p2 = p[1], p[2] - if len(p2) == 2: - lineno, col = lopen_loc(p1) - p0 = ast.BinOp( - left=p1, op=p2[0], right=p2[1], lineno=lineno, col_offset=col - ) - else: - left = p1 - for op, right in zip(p2[::2], p2[1::2]): - locer = left if left is p1 else op - lineno, col = lopen_loc(locer) - left = ast.BinOp( - left=left, op=op, right=right, lineno=lineno, col_offset=col - ) - p0 = left - p[0] = p0 - - _term_binops = { - "+": ast.Add, - "-": ast.Sub, - "*": ast.Mult, - "@": ast.MatMult, - "/": ast.Div, - "%": ast.Mod, - "//": ast.FloorDiv, - } - - def p_pm_term(self, p): - """pm_term : plus_tok term - | minus_tok term - """ - p1 = p[1] - op = self._term_binops[p1.value](lineno=p1.lineno, col_offset=p1.lexpos) - p[0] = [op, p[2]] - - def p_term(self, p): - """term : factor op_factor_list_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - p0 = p1 - elif len(p2) == 2: - lineno, col = lopen_loc(p1) - p0 = ast.BinOp( - left=p1, op=p2[0], right=p2[1], lineno=lineno, col_offset=col - ) - else: - left = p1 - for op, right in zip(p2[::2], p2[1::2]): - locer = left if left is p1 else op - lineno, col = lopen_loc(locer) - left = ast.BinOp( - left=left, op=op, right=right, lineno=lineno, col_offset=col - ) - p0 = left - p[0] = p0 - - def p_op_factor(self, p): - """op_factor : times_tok factor - | at_tok factor - | divide_tok factor - | mod_tok factor - | doublediv_tok factor - """ - p1 = p[1] - op = self._term_binops[p1.value] - if op is None: - self._parse_error( - "operation {0!r} not supported".format(p1), - self.currloc(lineno=p.lineno, column=p.lexpos), - ) - p[0] = [op(lineno=p1.lineno, col_offset=p1.lexpos), p[2]] - - _factor_ops = {"+": ast.UAdd, "-": ast.USub, "~": ast.Invert} - - def p_factor_power(self, p): - """factor : power""" - p[0] = p[1] - - def p_factor_unary(self, p): - """factor : PLUS factor - | MINUS factor - | TILDE factor - """ - op = self._factor_ops[p[1]]() - p[0] = ast.UnaryOp(op=op, operand=p[2], lineno=self.lineno, col_offset=self.col) - - def p_power_atom(self, p): - """power : atom_expr""" - p[0] = p[1] - - def p_power(self, p): - """power : atom_expr POW factor""" - p1 = p[1] - p[0] = ast.BinOp( - left=p1, - op=ast.Pow(), - right=p[3], - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - - def p_yield_expr_or_testlist_comp(self, p): - """yield_expr_or_testlist_comp : yield_expr - | testlist_comp - """ - p[0] = p[1] - - def _list_or_elts_if_not_real_tuple(self, x): - if isinstance(x, ast.Tuple) and not ( - hasattr(x, "_real_tuple") and x._real_tuple - ): - rtn = x.elts - else: - rtn = [x] - return rtn - - def apply_trailers(self, leader, trailers): - """Helper function for atom expr.""" - if trailers is None: - return leader - p0 = leader - for trailer in trailers: - if isinstance(trailer, (ast.Index, ast.Slice, ast.ExtSlice)): - p0 = ast.Subscript( - value=leader, - slice=trailer, - ctx=ast.Load(), - lineno=leader.lineno, - col_offset=leader.col_offset, - ) - elif isinstance(trailer, Mapping): - # call normal functions - p0 = ast.Call( - func=leader, - lineno=leader.lineno, - col_offset=leader.col_offset, - **trailer - ) - elif isinstance(trailer, (ast.Tuple, tuple)): - # call macro functions - l, c = leader.lineno, leader.col_offset - gblcall = xonsh_call("globals", [], lineno=l, col=c) - loccall = xonsh_call("locals", [], lineno=l, col=c) - if isinstance(trailer, tuple): - trailer, arglist = trailer - margs = [leader, trailer, gblcall, loccall] - p0 = xonsh_call("__xonsh__.call_macro", margs, lineno=l, col=c) - elif isinstance(trailer, str): - if trailer == "?": - p0 = xonsh_help(leader, lineno=leader.lineno, col=leader.col_offset) - elif trailer == "??": - p0 = xonsh_superhelp( - leader, lineno=leader.lineno, col=leader.col_offset - ) - else: - p0 = ast.Attribute( - value=leader, - attr=trailer, - ctx=ast.Load(), - lineno=leader.lineno, - col_offset=leader.col_offset, - ) - else: - assert False - leader = p0 - return p0 - - def p_atom_expr(self, p): - """atom_expr : atom trailer_list_opt""" - p[0] = self.apply_trailers(p[1], p[2]) - - # - # Atom rules! (So does Adam!) - # - def p_atom_lparen(self, p): - """atom : lparen_tok yield_expr_or_testlist_comp_opt RPAREN""" - p1, p2 = p[1], p[2] - p1, p1_tok = p1.value, p1 - if p2 is None: - # empty container atom - p0 = ast.Tuple( - elts=[], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col - ) - p0._real_tuple = True - elif isinstance(p2, ast.AST): - p0 = p2 - p0._lopen_lineno, p0._lopen_col = p1_tok.lineno, p1_tok.lexpos - p0._real_tuple = True - elif len(p2) == 1 and isinstance(p2[0], ast.AST): - p0 = p2[0] - p0._lopen_lineno, p0._lopen_col = p1_tok.lineno, p1_tok.lexpos - else: - self.p_error(p) - p[0] = p0 - - def p_atom_lbraket(self, p): - """atom : lbracket_tok testlist_comp_opt RBRACKET""" - p1, p2 = p[1], p[2] - p1, p1_tok = p1.value, p1 - if p2 is None: - p0 = ast.List( - elts=[], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col - ) - - elif isinstance(p2, ast.GeneratorExp): - p0 = ast.ListComp( - elt=p2.elt, - generators=p2.generators, - lineno=p2.lineno, - col_offset=p2.col_offset, - ) - else: - if isinstance(p2, ast.Tuple): - if hasattr(p2, "_real_tuple") and p2._real_tuple: - elts = [p2] - else: - elts = p2.elts - else: - elts = [p2] - p0 = ast.List( - elts=elts, - ctx=ast.Load(), - lineno=p1_tok.lineno, - col_offset=p1_tok.lexpos, - ) - p[0] = p0 - - def p_atom_lbrace(self, p): - """atom : lbrace_tok dictorsetmaker_opt RBRACE""" - p1, p2 = p[1], p[2] - p1, p1_tok = p1.value, p1 - if p2 is None: - p0 = ast.Dict( - keys=[], - values=[], - ctx=ast.Load(), - lineno=self.lineno, - col_offset=self.col, - ) - else: - p0 = p2 - p0.lineno, p0.col_offset = p1_tok.lineno, p1_tok.lexpos - p[0] = p0 - - def p_atom_ns(self, p): - """atom : number - | string_literal_list - """ - p[0] = p[1] - - def p_atom_name(self, p): - """atom : name_tok""" - p1 = p[1] - p[0] = ast.Name( - id=p1.value, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos - ) - - def p_atom_ellip(self, p): - """atom : ellipsis_tok""" - p1 = p[1] - p[0] = ast.EllipsisNode(lineno=p1.lineno, col_offset=p1.lexpos) - - def p_atom_none(self, p): - """atom : none_tok""" - p1 = p[1] - p[0] = ast.NameConstant(value=None, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_atom_true(self, p): - """atom : true_tok""" - p1 = p[1] - p[0] = ast.NameConstant(value=True, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_atom_false(self, p): - """atom : false_tok""" - p1 = p[1] - p[0] = ast.NameConstant(value=False, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_atom_pathsearch(self, p): - """atom : SEARCHPATH""" - p[0] = xonsh_pathsearch(p[1], pymode=True, lineno=self.lineno, col=self.col) - - def p_atom_dname(self, p): - """atom : DOLLAR_NAME""" - p[0] = self._envvar_by_name(p[1][1:], lineno=self.lineno, col=self.col) - - def p_atom_fistful_of_dollars(self, p): - """atom : dollar_lbrace_tok test RBRACE - | bang_lparen_tok subproc RPAREN - | dollar_lparen_tok subproc RPAREN - | bang_lbracket_tok subproc RBRACKET - | dollar_lbracket_tok subproc RBRACKET - """ - p[0] = self._dollar_rules(p) - - def p_atom_bang_empty_fistful_of_dollars(self, p): - """atom : bang_lparen_tok subproc bang_tok RPAREN - | dollar_lparen_tok subproc bang_tok RPAREN - | bang_lbracket_tok subproc bang_tok RBRACKET - | dollar_lbracket_tok subproc bang_tok RBRACKET - """ - self._append_subproc_bang_empty(p) - p[0] = self._dollar_rules(p) - - def p_atom_bang_fistful_of_dollars(self, p): - """atom : bang_lparen_tok subproc bang_tok nocloser rparen_tok - | dollar_lparen_tok subproc bang_tok nocloser rparen_tok - | bang_lbracket_tok subproc bang_tok nocloser rbracket_tok - | dollar_lbracket_tok subproc bang_tok nocloser rbracket_tok - """ - self._append_subproc_bang(p) - p[0] = self._dollar_rules(p) - - def _attach_nocloser_base_rules(self): - toks = set(self.tokens) - toks -= { - "LPAREN", - "RPAREN", - "LBRACE", - "RBRACE", - "LBRACKET", - "RBRACKET", - "AT_LPAREN", - "BANG_LPAREN", - "BANG_LBRACKET", - "DOLLAR_LPAREN", - "DOLLAR_LBRACE", - "DOLLAR_LBRACKET", - "ATDOLLAR_LPAREN", - } - ts = "\n | ".join(sorted(toks)) - doc = "nocloser : " + ts + "\n" - self.p_nocloser_base.__func__.__doc__ = doc - - def p_nocloser_base(self, p): - # see above attachment function - pass - - def p_nocloser_any(self, p): - """nocloser : any_nested_raw""" - pass - - def p_nocloser_many(self, p): - """nocloser : nocloser nocloser""" - pass - - def p_string_literal(self, p): - """string_literal : string_tok""" - p1 = p[1] - prefix = RE_STRINGPREFIX.match(p1.value).group().lower() - if "p" in prefix and "f" in prefix: - new_pref = prefix.replace("p", "") - value_without_p = new_pref + p1.value[len(prefix):] - s = eval_fstr_fields(value_without_p, new_pref, filename=self.lexer.fname) - s = pyparse(s).body[0].value - s = ast.increment_lineno(s, p1.lineno - 1) - p[0] = xonsh_call( - "__xonsh__.path_literal", [s], lineno=p1.lineno, col=p1.lexpos - ) - - elif "p" in prefix: - value_without_p = prefix.replace("p", "") + p1.value[len(prefix) :] - s = ast.Str( - s=ast.literal_eval(value_without_p), - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - p[0] = xonsh_call( - "__xonsh__.path_literal", [s], lineno=p1.lineno, col=p1.lexpos - ) - elif "f" in prefix: - s = eval_fstr_fields(p1.value, prefix, filename=self.lexer.fname) - s = pyparse(s).body[0].value - s = ast.increment_lineno(s, p1.lineno - 1) - p[0] = s - else: - s = ast.literal_eval(p1.value) - is_bytes = "b" in prefix - cls = ast.Bytes if is_bytes else ast.Str - p[0] = cls(s=s, lineno=p1.lineno, col_offset=p1.lexpos) - - def p_string_literal_list(self, p): - """string_literal_list : string_literal - | string_literal_list string_literal - """ - if len(p) == 3: - p[1].s += p[2].s - p[0] = p[1] - - def p_number(self, p): - """number : number_tok""" - p1 = p[1] - p[0] = ast.Num( - n=ast.literal_eval(p1.value.replace("_", "")), - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - - def p_testlist_comp_comp(self, p): - """testlist_comp : test_or_star_expr comp_for""" - p1, p2 = p[1], p[2] - p[0] = ast.GeneratorExp( - elt=p1, generators=p2["comps"], lineno=p1.lineno, col_offset=p1.col_offset - ) - - def p_testlist_comp_comma(self, p): - """testlist_comp : test_or_star_expr comma_opt""" - p1, p2 = p[1], p[2] - if p2 is None: # split out grouping parentheses. - p[0] = p1 - else: - p[0] = ast.Tuple( - elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset - ) - - def p_testlist_comp_many(self, p): - """testlist_comp : test_or_star_expr comma_test_or_star_expr_list comma_opt""" - p1, p2 = p[1], p[2] - p[0] = ast.Tuple( - elts=[p1] + p2, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset - ) - - def p_trailer_lparen(self, p): - """trailer : LPAREN arglist_opt RPAREN""" - p[0] = [p[2] or dict(args=[], keywords=[], starargs=None, kwargs=None)] - - def p_trailer_bang_lparen(self, p): - """trailer : bang_lparen_tok macroarglist_opt rparen_tok - | bang_lparen_tok nocomma comma_tok rparen_tok - | bang_lparen_tok nocomma comma_tok WS rparen_tok - | bang_lparen_tok macroarglist comma_tok rparen_tok - | bang_lparen_tok macroarglist comma_tok WS rparen_tok - """ - p1, p2, p3 = p[1], p[2], p[3] - begins = [(p1.lineno, p1.lexpos + 2)] - ends = [(p3.lineno, p3.lexpos)] - if p2: - begins.extend([(x[0], x[1] + 1) for x in p2]) - ends = p2 + ends - elts = [] - for beg, end in zip(begins, ends): - s = self.source_slice(beg, end).strip() - if not s: - if len(begins) == 1: - break - else: - msg = "empty macro arguments not allowed" - self._parse_error(msg, self.currloc(*beg)) - node = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) - elts.append(node) - p0 = ast.Tuple( - elts=elts, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos - ) - p[0] = [p0] - - def p_trailer_p3(self, p): - """trailer : LBRACKET subscriptlist RBRACKET - | PERIOD NAME - """ - p[0] = [p[2]] - - def p_trailer_quest(self, p): - """trailer : DOUBLE_QUESTION - | QUESTION - """ - p[0] = [p[1]] - - def _attach_nocomma_tok_rules(self): - toks = set(self.tokens) - toks -= { - "COMMA", - "LPAREN", - "RPAREN", - "LBRACE", - "RBRACE", - "LBRACKET", - "RBRACKET", - "AT_LPAREN", - "BANG_LPAREN", - "BANG_LBRACKET", - "DOLLAR_LPAREN", - "DOLLAR_LBRACE", - "DOLLAR_LBRACKET", - "ATDOLLAR_LPAREN", - } - ts = "\n | ".join(sorted(toks)) - doc = "nocomma_tok : " + ts + "\n" - self.p_nocomma_tok.__func__.__doc__ = doc - - # The following grammar rules are no-ops because we don't need to glue the - # source code back together piece-by-piece. Instead, we simply look for - # top-level commas and record their positions. With these positions and the - # respective positions of the bounding parentheses, we can use the - # source_slice() method. This does a much better job of capturing exactly - # the source code that was provided. The tokenizer & lexer can be a little - # lossy, especially with respect to whitespace. - - def p_nocomma_tok(self, p): - # see attachment function above for docstring - pass - - def p_any_raw_tok(self, p): - """any_raw_tok : nocomma - | COMMA - """ - pass - - def p_any_raw_toks_one(self, p): - """any_raw_toks : any_raw_tok""" - pass - - def p_any_raw_toks_many(self, p): - """any_raw_toks : any_raw_toks any_raw_tok""" - pass - - def p_nocomma_part_tok(self, p): - """nocomma_part : nocomma_tok""" - pass - - def p_any_nested_raw(self, p): - """any_nested_raw : LPAREN any_raw_toks_opt RPAREN - | LBRACE any_raw_toks_opt RBRACE - | LBRACKET any_raw_toks_opt RBRACKET - | AT_LPAREN any_raw_toks_opt RPAREN - | BANG_LPAREN any_raw_toks_opt RPAREN - | BANG_LBRACKET any_raw_toks_opt RBRACKET - | DOLLAR_LPAREN any_raw_toks_opt RPAREN - | DOLLAR_LBRACE any_raw_toks_opt RBRACE - | DOLLAR_LBRACKET any_raw_toks_opt RBRACKET - | ATDOLLAR_LPAREN any_raw_toks_opt RPAREN - """ - pass - - def p_nocomma_part_any(self, p): - """nocomma_part : any_nested_raw""" - pass - - def p_nocomma_base(self, p): - """nocomma : nocomma_part""" - pass - - def p_nocomma_append(self, p): - """nocomma : nocomma nocomma_part""" - pass - - def p_comma_nocomma(self, p): - """comma_nocomma : comma_tok nocomma""" - p1 = p[1] - p[0] = [(p1.lineno, p1.lexpos)] - - def p_macroarglist_single(self, p): - """macroarglist : nocomma""" - p[0] = [] - - def p_macroarglist_many(self, p): - """macroarglist : nocomma comma_nocomma_list""" - p[0] = p[2] - - def p_subscriptlist(self, p): - """subscriptlist : subscript comma_subscript_list_opt comma_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - pass - elif isinstance(p1, ast.Slice) or any([isinstance(x, ast.Slice) for x in p2]): - p1 = ast.ExtSlice(dims=[p1] + p2) - else: - p1.value = ast.Tuple( - elts=[p1.value] + [x.value for x in p2], - ctx=ast.Load(), - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - p[0] = p1 - - def p_comma_subscript(self, p): - """comma_subscript : COMMA subscript""" - p[0] = [p[2]] - - def p_subscript_test(self, p): - """subscript : test""" - p1 = p[1] - p[0] = ast.Index(value=p1, lineno=p1.lineno, col_offset=p1.col_offset) - - def p_subscript_tok(self, p): - """subscript : test_opt colon_tok test_opt sliceop_opt""" - p1 = p[1] - if p1 is None: - p2 = p[2] - lineno, col = p2.lineno, p2.lexpos - else: - lineno, col = p1.lineno, p1.col_offset - p[0] = ast.Slice(lower=p1, upper=p[3], step=p[4], lineno=lineno, col_offset=col) - - def p_sliceop(self, p): - """sliceop : COLON test_opt""" - p[0] = p[2] - - def p_expr_or_star_expr(self, p): - """expr_or_star_expr : expr - | star_expr - """ - p[0] = p[1] - - def p_comma_expr_or_star_expr(self, p): - """comma_expr_or_star_expr : COMMA expr_or_star_expr""" - p[0] = [p[2]] - - def p_exprlist_e3(self, p): - """exprlist : expr_or_star_expr comma_opt""" - p[0] = [p[1]] - - def p_exprlist_many(self, p): - """exprlist : expr_or_star_expr comma_expr_or_star_expr_list comma_opt""" - p2 = p[2] - p2.insert(0, p[1]) - p[0] = p2 - - def p_testlist_test(self, p): - """testlist : test""" - p1 = p[1] - if isinstance(p1, ast.Tuple) and ( - hasattr(p1, "_real_tuple") and p1._real_tuple and p1.elts - ): - p1.lineno, p1.col_offset = lopen_loc(p1.elts[0]) - p[0] = p1 - - def p_testlist_single(self, p): - """testlist : test COMMA""" - p1 = p[1] - if isinstance(p1, ast.List) or ( - isinstance(p1, ast.Tuple) and hasattr(p1, "_real_tuple") and p1._real_tuple - ): - lineno, col = lopen_loc(p1) - p[0] = ast.Tuple( - elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset - ) - else: - p[0] = ensure_has_elts(p[1]) - - def p_testlist_many(self, p): - """testlist : test comma_test_list COMMA - | test comma_test_list - """ - p1 = p[1] - if isinstance(p1, ast.List) or ( - isinstance(p1, ast.Tuple) and hasattr(p1, "_real_tuple") and p1._real_tuple - ): - lineno, col = lopen_loc(p1) - p1 = ast.Tuple( - elts=[p1], ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.col_offset - ) - else: - p1 = ensure_has_elts(p1) - p1.elts += p[2] - p[0] = p1 - - def p_comma_item(self, p): - """comma_item : COMMA item""" - p[0] = p[2] - - # - # Dict or set maker - # - def p_dictorsetmaker_t6(self, p): - """dictorsetmaker : test COLON test comma_item_list comma_opt""" - p1, p4 = p[1], p[4] - keys = [p1] - vals = [p[3]] - for k, v in zip(p4[::2], p4[1::2]): - keys.append(k) - vals.append(v) - lineno, col = lopen_loc(p1) - p[0] = ast.Dict( - keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col - ) - - def p_dictorsetmaker_i4(self, p): - """dictorsetmaker : item comma_item_list comma_opt""" - p1, p2 = p[1], p[2] - keys = [p1[0]] - vals = [p1[1]] - for k, v in zip(p2[::2], p2[1::2]): - keys.append(k) - vals.append(v) - lineno, col = lopen_loc(p1[0] or p2[0]) - p[0] = ast.Dict( - keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col - ) - - def p_dictorsetmaker_t4_dict(self, p): - """dictorsetmaker : test COLON testlist""" - keys = [p[1]] - vals = self._list_or_elts_if_not_real_tuple(p[3]) - lineno, col = lopen_loc(p[1]) - p[0] = ast.Dict( - keys=keys, values=vals, ctx=ast.Load(), lineno=lineno, col_offset=col - ) - - def p_dictorsetmaker_t4_set(self, p): - """dictorsetmaker : test_or_star_expr comma_test_or_star_expr_list comma_opt""" - p[0] = ast.Set( - elts=[p[1]] + p[2], ctx=ast.Load(), lineno=self.lineno, col_offset=self.col - ) - - def p_dictorsetmaker_test_comma(self, p): - """dictorsetmaker : test_or_star_expr comma_opt""" - elts = self._list_or_elts_if_not_real_tuple(p[1]) - p[0] = ast.Set( - elts=elts, ctx=ast.Load(), lineno=self.lineno, col_offset=self.col - ) - - def p_dictorsetmaker_testlist(self, p): - """dictorsetmaker : testlist""" - elts = self._list_or_elts_if_not_real_tuple(p[1]) - p[0] = ast.Set( - elts=elts, ctx=ast.Load(), lineno=self.lineno, col_offset=self.col - ) - - def p_dictorsetmaker_comp(self, p): - """dictorsetmaker : item comp_for - | test_or_star_expr comp_for - """ - p1 = p[1] - comps = p[2].get("comps", []) - if isinstance(p1, list) and len(p1) == 2: - p[0] = ast.DictComp( - key=p1[0], - value=p1[1], - generators=comps, - lineno=self.lineno, - col_offset=self.col, - ) - else: - p[0] = ast.SetComp( - elt=p1, generators=comps, lineno=self.lineno, col_offset=self.col - ) - - def p_classdef(self, p): - """classdef : class_tok NAME func_call_opt COLON suite""" - p1, p3 = p[1], p[3] - b, kw = ([], []) if p3 is None else (p3["args"], p3["keywords"]) - c = ast.ClassDef( - name=p[2], - bases=b, - keywords=kw, - starargs=None, - kwargs=None, - body=p[5], - decorator_list=[], - lineno=p1.lineno, - col_offset=p1.lexpos, - ) - p[0] = [c] - - def p_comma_argument(self, p): - """comma_argument : COMMA argument""" - p[0] = [p[2]] - - def p_comp_iter(self, p): - """comp_iter : comp_for - | comp_if - """ - p[0] = p[1] - - def p_comp_for(self, p): - """comp_for : FOR exprlist IN or_test comp_iter_opt""" - targs, it, p5 = p[2], p[4], p[5] - if len(targs) == 1: - targ = targs[0] - else: - targ = ensure_has_elts(targs) - store_ctx(targ) - comp = ast.comprehension(target=targ, iter=it, ifs=[]) - comps = [comp] - p0 = {"comps": comps} - if p5 is not None: - comps += p5.get("comps", []) - comp.ifs += p5.get("if", []) - p[0] = p0 - - def p_comp_if(self, p): - """comp_if : IF test_nocond comp_iter_opt""" - p2, p3 = p[2], p[3] - p0 = {"if": [p2]} - if p3 is not None: - p0["comps"] = p3.get("comps", []) - p[0] = p0 - - def p_yield_expr(self, p): - """yield_expr : yield_tok yield_arg_opt""" - p1, p2 = p[1], p[2] - if p2 is None: - p0 = ast.Yield(value=p2, lineno=p1.lineno, col_offset=p1.lexpos) - elif p2["from"]: - p0 = ast.YieldFrom(value=p2["val"], lineno=p1.lineno, col_offset=p1.lexpos) - else: - p0 = ast.Yield(value=p2["val"], lineno=p1.lineno, col_offset=p1.lexpos) - p[0] = p0 - - def p_yield_arg_from(self, p): - """yield_arg : FROM test""" - p[0] = {"from": True, "val": p[2]} - - def p_yield_arg_testlist(self, p): - """yield_arg : testlist""" - p[0] = {"from": False, "val": p[1]} - - # - # subprocess - # - - def _dollar_rules(self, p): - """These handle the special xonsh $ shell atoms by looking up - in a special __xonsh__.env dictionary injected in the __builtin__. - """ - lenp = len(p) - p1, p2 = p[1], p[2] - if isinstance(p1, LexToken): - p1, p1_tok = p1.value, p1 - lineno, col = p1_tok.lineno, p1_tok.lexpos - else: - lineno, col = self.lineno, self.col - if lenp == 3: # $NAME - p0 = self._envvar_by_name(p2, lineno=lineno, col=col) - elif p1 == "${": - xenv = load_attribute_chain("__xonsh__.env", lineno=lineno, col=col) - idx = ast.Index(value=p2) - p0 = ast.Subscript( - value=xenv, slice=idx, ctx=ast.Load(), lineno=lineno, col_offset=col - ) - elif p1 == "$(": - p0 = xonsh_call( - "__xonsh__.subproc_captured_stdout", p2, lineno=lineno, col=col - ) - elif p1 == "!(": - p0 = xonsh_call( - "__xonsh__.subproc_captured_object", p2, lineno=lineno, col=col - ) - elif p1 == "![": - p0 = xonsh_call( - "__xonsh__.subproc_captured_hiddenobject", p2, lineno=lineno, col=col - ) - elif p1 == "$[": - p0 = xonsh_call("__xonsh__.subproc_uncaptured", p2, lineno=lineno, col=col) - else: - assert False - return p0 - - def _envvar_getter_by_name(self, var, lineno=None, col=None): - xenv = load_attribute_chain("__xonsh__.env", lineno=lineno, col=col) - func = ast.Attribute( - value=xenv, attr="get", ctx=ast.Load(), lineno=lineno, col_offset=col - ) - return ast.Call( - func=func, - args=[ - ast.Str(s=var, lineno=lineno, col_offset=col), - ast.Str(s="", lineno=lineno, col_offset=col), - ], - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ) - - def _envvar_by_name(self, var, lineno=None, col=None): - """Looks up a xonsh variable by name.""" - xenv = load_attribute_chain("__xonsh__.env", lineno=lineno, col=col) - idx = ast.Index(value=ast.Str(s=var, lineno=lineno, col_offset=col)) - return ast.Subscript( - value=xenv, slice=idx, ctx=ast.Load(), lineno=lineno, col_offset=col - ) - - def _subproc_cliargs(self, args, lineno=None, col=None): - """Creates an expression for subprocess CLI arguments.""" - cliargs = currlist = empty_list(lineno=lineno, col=col) - for arg in args: - action = arg._cliarg_action - if action == "append": - if currlist is None: - currlist = empty_list(lineno=lineno, col=col) - cliargs = binop( - cliargs, ast.Add(), currlist, lineno=lineno, col=col - ) - currlist.elts.append(arg) - elif action == "extend": - cliargs = binop(cliargs, ast.Add(), arg, lineno=lineno, col=col) - currlist = None - elif action == "splitlines": - sl = call_split_lines(arg, lineno=lineno, col=col) - cliargs = binop(cliargs, ast.Add(), sl, lineno=lineno, col=col) - currlist = None - elif action == "ensure_list": - x = ensure_list_from_str_or_list(arg, lineno=lineno, col=col) - cliargs = binop(cliargs, ast.Add(), x, lineno=lineno, col=col) - currlist = None - else: - raise ValueError("action not understood: " + action) - del arg._cliarg_action - return cliargs - - def p_pipe(self, p): - """pipe : PIPE - | WS PIPE - | PIPE WS - | WS PIPE WS - """ - p[0] = ast.Str(s="|", lineno=self.lineno, col_offset=self.col) - - def p_subproc_s2(self, p): - """subproc : subproc_atoms - | subproc_atoms WS - """ - p1 = p[1] - p[0] = [self._subproc_cliargs(p1, lineno=self.lineno, col=self.col)] - - def p_subproc_amp(self, p): - """subproc : subproc AMPERSAND""" - p1 = p[1] - p[0] = p1 + [ast.Str(s=p[2], lineno=self.lineno, col_offset=self.col)] - - def p_subproc_pipe(self, p): - """subproc : subproc pipe subproc_atoms - | subproc pipe subproc_atoms WS - """ - p1 = p[1] - if len(p1) > 1 and hasattr(p1[-2], "s") and p1[-2].s != "|": - msg = "additional redirect following non-pipe redirect" - self._parse_error(msg, self.currloc(lineno=self.lineno, column=self.col)) - cliargs = self._subproc_cliargs(p[3], lineno=self.lineno, col=self.col) - p[0] = p1 + [p[2], cliargs] - - def p_subproc_atoms_single(self, p): - """subproc_atoms : subproc_atom""" - p[0] = [p[1]] - - def p_subproc_atoms_many(self, p): - """subproc_atoms : subproc_atoms WS subproc_atom""" - p1 = p[1] - p1.append(p[3]) - p[0] = p1 - - def p_subproc_atoms_subshell(self, p): - """subproc_atoms : lparen_tok any_raw_tok rparen_tok - | lparen_tok any_raw_toks rparen_tok - """ - p1 = p[1] - p3 = p[3] - l = p1.lineno - c = p1.lexpos + 1 - subcmd = self.source_slice((l, c), (p3.lineno, p3.lexpos)) - subcmd = subcmd.strip() + "\n" - p0 = [ - ast.Str(s="xonsh", lineno=l, col_offset=c), - ast.Str(s="-c", lineno=l, col_offset=c), - ast.Str(s=subcmd, lineno=l, col_offset=c), - ] - for arg in p0: - arg._cliarg_action = "append" - p[0] = p0 - - # - # Subproc atom rules - # - def _append_subproc_bang_empty(self, p): - """Appends an empty string in subprocess mode to the argument list.""" - p3 = p[3] - node = ast.Str(s="", lineno=p3.lineno, col_offset=p3.lexpos + 1) - p[2][-1].elts.append(node) - - def _append_subproc_bang(self, p): - """Appends the part between ! and the ) or ] in subprocess mode to the - argument list. - """ - p3, p5 = p[3], p[5] - beg = (p3.lineno, p3.lexpos + 1) - end = (p5.lineno, p5.lexpos) - s = self.source_slice(beg, end).strip() - node = ast.Str(s=s, lineno=beg[0], col_offset=beg[1]) - p[2][-1].elts.append(node) - - def p_subproc_atom_uncaptured(self, p): - """subproc_atom : dollar_lbracket_tok subproc RBRACKET""" - p1 = p[1] - p0 = xonsh_call( - "__xonsh__.subproc_uncaptured", args=p[2], lineno=p1.lineno, col=p1.lexpos - ) - p0._cliarg_action = "splitlines" - p[0] = p0 - - def p_subproc_atom_uncaptured_bang_empty(self, p): - """subproc_atom : dollar_lbracket_tok subproc bang_tok RBRACKET""" - self._append_subproc_bang_empty(p) - self.p_subproc_atom_uncaptured(p) - - def p_subproc_atom_uncaptured_bang(self, p): - """subproc_atom : dollar_lbracket_tok subproc bang_tok nocloser rbracket_tok""" - self._append_subproc_bang(p) - self.p_subproc_atom_uncaptured(p) - - def p_subproc_atom_captured_stdout(self, p): - """subproc_atom : dollar_lparen_tok subproc RPAREN""" - p1 = p[1] - p0 = xonsh_call( - "__xonsh__.subproc_captured_stdout", - args=p[2], - lineno=p1.lineno, - col=p1.lexpos, - ) - p0._cliarg_action = "append" - p[0] = p0 - - def p_subproc_atom_captured_stdout_bang_empty(self, p): - """subproc_atom : dollar_lparen_tok subproc bang_tok RPAREN""" - self._append_subproc_bang_empty(p) - self.p_subproc_atom_captured_stdout(p) - - def p_subproc_atom_captured_stdout_bang(self, p): - """subproc_atom : dollar_lparen_tok subproc bang_tok nocloser rparen_tok""" - self._append_subproc_bang(p) - self.p_subproc_atom_captured_stdout(p) - - def p_subproc_atom_pyenv_lookup(self, p): - """subproc_atom : dollar_lbrace_tok test RBRACE""" - p1 = p[1] - lineno, col = p1.lineno, p1.lexpos - xenv = load_attribute_chain("__xonsh__.env", lineno=lineno, col=col) - func = ast.Attribute( - value=xenv, attr="get", ctx=ast.Load(), lineno=lineno, col_offset=col - ) - p0 = ast.Call( - func=func, - args=[p[2], ast.Str(s="", lineno=lineno, col_offset=col)], - keywords=[], - starargs=None, - kwargs=None, - lineno=lineno, - col_offset=col, - ) - p0._cliarg_action = "append" - p[0] = p0 - - def p_subproc_atom_pyeval(self, p): - """subproc_atom : at_lparen_tok testlist_comp RPAREN - subproc_arg_part : at_lparen_tok testlist_comp RPAREN - """ - p1 = p[1] - p0 = xonsh_call( - "__xonsh__.list_of_strs_or_callables", - [p[2]], - lineno=p1.lineno, - col=p1.lexpos, - ) - p0._cliarg_action = "extend" - p[0] = p0 - - def p_subproc_atom_subproc_inject(self, p): - """subproc_atom : atdollar_lparen_tok subproc RPAREN""" - p1 = p[1] - p0 = xonsh_call( - "__xonsh__.subproc_captured_inject", p[2], lineno=p1.lineno, col=p1.lexpos - ) - p0._cliarg_action = "extend" - p[0] = p0 - - def p_subproc_atom_subproc_inject_bang_empty(self, p): - """subproc_atom : atdollar_lparen_tok subproc bang_tok RPAREN""" - self._append_subproc_bang_empty(p) - self.p_subproc_atom_subproc_inject(p) - - def p_subproc_atom_subproc_inject_bang(self, p): - """subproc_atom : atdollar_lparen_tok subproc bang_tok nocloser rparen_tok""" - self._append_subproc_bang(p) - self.p_subproc_atom_subproc_inject(p) - - def p_subproc_atom_redirect(self, p): - """subproc_atom : GT - | LT - | RSHIFT - | IOREDIRECT - """ - p0 = ast.Str(s=p[1], lineno=self.lineno, col_offset=self.col) - p0._cliarg_action = "append" - p[0] = p0 - - def p_subproc_atom_re(self, p): - """subproc_atom : SEARCHPATH""" - p0 = xonsh_pathsearch(p[1], pymode=False, lineno=self.lineno, col=self.col) - p0._cliarg_action = "extend" - p[0] = p0 - - def p_subproc_atom_str(self, p): - """subproc_atom : string_literal""" - p0 = xonsh_call( - "__xonsh__.expand_path", args=[p[1]], lineno=self.lineno, col=self.col - ) - p0._cliarg_action = "append" - p[0] = p0 - - def p_subproc_atom_arg(self, p): - """subproc_atom : subproc_arg""" - p1 = p[1] - if isinstance(p1, list): - # has an expanding function call, such as @(x) - p0 = xonsh_call( - "__xonsh__.list_of_list_of_strs_outer_product", - args=[ensure_has_elts(p1)], - lineno=p1[0].lineno, - col=p1[0].col_offset, - ) - p0._cliarg_action = "extend" - elif hasglobstar(p1): - # globbed literal argument - p0 = xonsh_call( - "__xonsh__.glob", args=[p1], lineno=p1.lineno, col=p1.col_offset - ) - p0._cliarg_action = "extend" - else: - # literal str argument - p0 = xonsh_call( - "__xonsh__.expand_path", args=[p1], lineno=p1.lineno, col=p1.col_offset - ) - p0._cliarg_action = "append" - p[0] = p0 - - def p_subproc_arg_single(self, p): - """subproc_arg : subproc_arg_part""" - p[0] = p[1] - - def p_subproc_arg_many(self, p): - """subproc_arg : subproc_arg subproc_arg_part""" - # This glues the string together after parsing - p1 = p[1] - p2 = p[2] - if isinstance(p1, ast.Str) and isinstance(p2, ast.Str): - p0 = ast.Str(p1.s + p2.s, lineno=p1.lineno, col_offset=p1.col_offset) - elif isinstance(p1, list): - if isinstance(p2, list): - p1.extend(p2) - else: - p1.append(p2) - p0 = p1 - elif isinstance(p2, list): - p2.insert(0, p1) - p0 = p2 - else: - p0 = [p1, p2] - p[0] = p0 - - def _attach_subproc_arg_part_rules(self): - toks = set(self.tokens) - toks -= { - "AND", - "OR", - "NOT", - "BANG", - "PIPE", - "WS", - "GT", - "LT", - "LSHIFT", - "RSHIFT", - "IOREDIRECT", - "SEARCHPATH", - "INDENT", - "DEDENT", - "LPAREN", - "RPAREN", - "LBRACE", - "RBRACE", - "LBRACKET", - "RBRACKET", - "AT_LPAREN", - "BANG_LPAREN", - "BANG_LBRACKET", - "DOLLAR_LPAREN", - "DOLLAR_LBRACE", - "DOLLAR_LBRACKET", - "ATDOLLAR_LPAREN", - } - ts = "\n | ".join(sorted([t.lower() + "_tok" for t in toks])) - doc = "subproc_arg_part : " + ts + "\n" - self.p_subproc_arg_part.__func__.__doc__ = doc - - def p_subproc_arg_part(self, p): - # Many tokens cannot be part of this rule, such as $, ', ", () - # Use a string atom instead. See above attachment functions - p1 = p[1] - p[0] = ast.Str(s=p1.value, lineno=p1.lineno, col_offset=p1.lexpos) - - # - # Helpers - # - - def p_test_comma_combine(self, p): - """test_comma_list : test comma_test_list - | test comma_test_list COMMA - """ - p2 = p[2] - p2.insert(0, p[1]) - p[0] = p2 - - def p_empty(self, p): - "empty : " - p[0] = None - - def p_error(self, p): - if p is None: - self._parse_error("no further code", None) - elif p.type == "ERRORTOKEN": - if isinstance(p.value, BaseException): - raise p.value - else: - self._parse_error( - p.value, self.currloc(lineno=p.lineno, column=p.lexpos) - ) - else: - msg = ("code: {0}".format(p.value),) - self._parse_error(msg, self.currloc(lineno=p.lineno, column=p.lexpos)) diff --git a/xonsh/parsers/context_check.py b/xonsh/parsers/context_check.py deleted file mode 100644 index a3f34f7..0000000 --- a/xonsh/parsers/context_check.py +++ /dev/null @@ -1,85 +0,0 @@ -import ast -import keyword -import collections - -_all_keywords = frozenset(keyword.kwlist) - - -def _not_assignable(x, augassign=False): - """ - If ``x`` represents a value that can be assigned to, return ``None``. - Otherwise, return a string describing the object. For use in generating - meaningful syntax errors. - """ - if augassign and isinstance(x, (ast.Tuple, ast.List)): - return "literal" - elif isinstance(x, (ast.Tuple, ast.List)): - if len(x.elts) == 0: - return "()" - for i in x.elts: - res = _not_assignable(i) - if res is not None: - return res - elif isinstance(x, (ast.Set, ast.Dict, ast.Num, ast.Str, ast.Bytes)): - return "literal" - elif isinstance(x, ast.Call): - return "function call" - elif isinstance(x, ast.Lambda): - return "lambda" - elif isinstance(x, (ast.BoolOp, ast.BinOp, ast.UnaryOp)): - return "operator" - elif isinstance(x, ast.IfExp): - return "conditional expression" - elif isinstance(x, ast.ListComp): - return "list comprehension" - elif isinstance(x, ast.DictComp): - return "dictionary comprehension" - elif isinstance(x, ast.SetComp): - return "set comprehension" - elif isinstance(x, ast.GeneratorExp): - return "generator expression" - elif isinstance(x, ast.Compare): - return "comparison" - elif isinstance(x, ast.Name) and x.id in _all_keywords: - return "keyword" - elif isinstance(x, ast.NameConstant): - return "keyword" - - -_loc = collections.namedtuple("_loc", ["lineno", "column"]) - - -def check_contexts(tree): - c = ContextCheckingVisitor() - c.visit(tree) - if c.error is not None: - e = SyntaxError(c.error[0]) - e.loc = _loc(c.error[1], c.error[2]) - raise e - - -class ContextCheckingVisitor(ast.NodeVisitor): - def __init__(self): - self.error = None - - def visit_Delete(self, node): - for i in node.targets: - err = _not_assignable(i) - if err is not None: - msg = "can't delete {}".format(err) - self.error = msg, i.lineno, i.col_offset - break - - def visit_Assign(self, node): - for i in node.targets: - err = _not_assignable(i) - if err is not None: - msg = "can't assign to {}".format(err) - self.error = msg, i.lineno, i.col_offset - break - - def visit_AugAssign(self, node): - err = _not_assignable(node.target, True) - if err is not None: - msg = "illegal target for augmented assignment: {}".format(err) - self.error = msg, node.target.lineno, node.target.col_offset diff --git a/xonsh/parsers/v34.py b/xonsh/parsers/v34.py deleted file mode 100644 index f8d7961..0000000 --- a/xonsh/parsers/v34.py +++ /dev/null @@ -1,157 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh parser for Python v3.4.""" -import xonsh.ast as ast -from xonsh.parsers.base import BaseParser - - -class Parser(BaseParser): - """A Python v3.4 compliant parser for the xonsh language.""" - - def __init__( - self, - lexer_optimize=True, - lexer_table="xonsh.lexer_table", - yacc_optimize=True, - yacc_table="xonsh.parser_table", - yacc_debug=False, - outputdir=None, - ): - """Parameters - ---------- - lexer_optimize : bool, optional - Set to false when unstable and true when lexer is stable. - lexer_table : str, optional - Lexer module used when optimized. - yacc_optimize : bool, optional - Set to false when unstable and true when parser is stable. - yacc_table : str, optional - Parser module used when optimized. - yacc_debug : debug, optional - Dumps extra debug info. - outputdir : str or None, optional - The directory to place generated tables within. - """ - # Rule creation and modification *must* take place before super() - opt_rules = ["argument_comma_list", "comma_argument_list"] - for rule in opt_rules: - self._opt_rule(rule) - - list_rules = ["argument_comma"] - for rule in list_rules: - self._list_rule(rule) - - super().__init__( - lexer_optimize=lexer_optimize, - lexer_table=lexer_table, - yacc_optimize=yacc_optimize, - yacc_table=yacc_table, - yacc_debug=yacc_debug, - outputdir=outputdir, - ) - - def p_classdef_or_funcdef(self, p): - """classdef_or_funcdef : classdef - | funcdef - """ - p[0] = p[1] - - def p_item(self, p): - """item : test COLON test""" - lenp = len(p) - if lenp == 4: - p0 = [p[1], p[3]] - elif lenp == 3: - p0 = [None, p[2]] - else: - assert False - p[0] = p0 - - def _set_arg(self, args, arg, ensure_kw=False): - if isinstance(arg, ast.keyword): - args["keywords"].append(arg) - elif ensure_kw: - args["kwargs"] = arg - else: - args["args"].append(arg) - - def p_arglist(self, p): - """arglist : argument comma_opt - | argument_comma_list argument comma_opt - | argument_comma_list_opt TIMES test comma_argument_list_opt - | argument_comma_list_opt TIMES test COMMA POW test - | argument_comma_list_opt TIMES test comma_argument_list COMMA POW test - | argument_comma_list_opt POW test - """ - lenp = len(p) - p1, p2 = p[1], p[2] - p0 = {"args": [], "keywords": [], "starargs": None, "kwargs": None} - if lenp == 3: - self._set_arg(p0, p1) - elif lenp == 4 and p2 != "**": - for arg in p1: - self._set_arg(p0, arg) - self._set_arg(p0, p2) - elif lenp == 4 and p2 == "**": - if p1 is not None: - for arg in p1: - self._set_arg(p0, arg) - self._set_arg(p0, p[3], ensure_kw=True) - elif lenp == 5: - p0["starargs"], p4 = p[3], p[4] - if p1 is not None: - for arg in p1: - self._set_arg(p0, arg) - if p4 is not None: - for arg in p4: - self._set_arg(p0, arg, ensure_kw=True) - elif lenp == 7: - p0["starargs"] = p[3] - if p1 is not None: - for arg in p1: - self._set_arg(p0, arg) - self._set_arg(p0, p[6], ensure_kw=True) - elif lenp == 8: - p0["starargs"], p4 = p[3], p[4] - if p1 is not None: - for arg in p1: - self._set_arg(p0, arg) - for arg in p4: - self._set_arg(p0, arg, ensure_kw=True) - self._set_arg(p0, p[7], ensure_kw=True) - else: - assert False - p[0] = p0 - - def p_argument_comma(self, p): - """argument_comma : argument COMMA""" - p[0] = [p[1]] - - def p_argument(self, p): - """argument : test - | test comp_for - | test EQUALS test - """ - # Really [keyword '='] test - # The reason that keywords are test nodes instead of NAME is that using - # NAME results in an ambiguity. - p1 = p[1] - lenp = len(p) - if lenp == 2: - p0 = p1 - elif lenp == 3: - if p1 == "**": - p0 = ast.keyword(arg=None, value=p[2]) - elif p1 == "*": - p0 = ast.Starred(value=p[2]) - else: - p0 = ast.GeneratorExp( - elt=p1, - generators=p[2]["comps"], - lineno=p1.lineno, - col_offset=p1.col_offset, - ) - elif lenp == 4: - p0 = ast.keyword(arg=p1.id, value=p[3]) - else: - assert False - p[0] = p0 diff --git a/xonsh/parsers/v35.py b/xonsh/parsers/v35.py deleted file mode 100644 index 6faf0f2..0000000 --- a/xonsh/parsers/v35.py +++ /dev/null @@ -1,148 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh parser for Python v3.5.""" -import xonsh.ast as ast -from xonsh.parsers.base import BaseParser - - -class Parser(BaseParser): - """A Python v3.5 compliant parser for the xonsh language.""" - - def __init__( - self, - lexer_optimize=True, - lexer_table="xonsh.lexer_table", - yacc_optimize=True, - yacc_table="xonsh.parser_table", - yacc_debug=False, - outputdir=None, - ): - """Parameters - ---------- - lexer_optimize : bool, optional - Set to false when unstable and true when lexer is stable. - lexer_table : str, optional - Lexer module used when optimized. - yacc_optimize : bool, optional - Set to false when unstable and true when parser is stable. - yacc_table : str, optional - Parser module used when optimized. - yacc_debug : debug, optional - Dumps extra debug info. - outputdir : str or None, optional - The directory to place generated tables within. - """ - # Rule creation and modification *must* take place before super() - tok_rules = ["await", "async"] - for rule in tok_rules: - self._tok_rule(rule) - super().__init__( - lexer_optimize=lexer_optimize, - lexer_table=lexer_table, - yacc_optimize=yacc_optimize, - yacc_table=yacc_table, - yacc_debug=yacc_debug, - outputdir=outputdir, - ) - - def p_classdef_or_funcdef(self, p): - """classdef_or_funcdef : classdef - | funcdef - | async_funcdef - """ - p[0] = p[1] - - def p_async_funcdef(self, p): - """async_funcdef : async_tok funcdef""" - p1, f = p[1], p[2][0] - p[0] = [ast.AsyncFunctionDef(**f.__dict__)] - p[0][0]._async_tok = p1 - - def p_async_compound_stmt(self, p): - """compound_stmt : async_stmt""" - p[0] = p[1] - - def p_async_for_stmt(self, p): - """async_for_stmt : ASYNC for_stmt""" - f = p[2][0] - p[0] = [ast.AsyncFor(**f.__dict__)] - - def p_async_with_stmt(self, p): - """async_with_stmt : ASYNC with_stmt""" - w = p[2][0] - p[0] = [ast.AsyncWith(**w.__dict__)] - - def p_atom_expr_await(self, p): - """atom_expr : await_tok atom trailer_list_opt""" - p0 = self.apply_trailers(p[2], p[3]) - p1 = p[1] - p0 = ast.Await(value=p0, ctx=ast.Load(), lineno=p1.lineno, col_offset=p1.lexpos) - p[0] = p0 - - def p_async_stmt(self, p): - """async_stmt : async_funcdef - | async_with_stmt - | async_for_stmt - """ - p[0] = p[1] - - def p_item_test(self, p): - """item : test COLON test""" - p[0] = [p[1], p[3]] - - def p_item_pow(self, p): - """item : POW expr""" - p[0] = [None, p[2]] - - def _set_arg(self, args, arg, ensure_kw=False): - if isinstance(arg, ast.keyword): - args["keywords"].append(arg) - elif ensure_kw: - args["keywords"].append(ast.keyword(arg=None, value=arg)) - else: - args["args"].append(arg) - - def p_arglist_single(self, p): - """arglist : argument comma_opt""" - p0 = {"args": [], "keywords": []} - self._set_arg(p0, p[1]) - p[0] = p0 - - def p_arglist_many(self, p): - """arglist : argument comma_argument_list comma_opt - """ - p0 = {"args": [], "keywords": []} - self._set_arg(p0, p[1]) - for arg in p[2]: - self._set_arg(p0, arg) - p[0] = p0 - - # Argument rules - # "test '=' test" is really "keyword '=' test", but we have no such token. - # These need to be in a single rule to avoid grammar that is ambiguous - # to our LL(1) parser. Even though 'test' includes '*expr' in star_expr, - # we explicitly match '*' here, too, to give it proper precedence. - # Illegal combinations and orderings are blocked in ast.c: - # multiple (test comp_for) arguments are blocked; keyword unpackings - # that precede iterable unpackings are blocked; etc. - def p_argument_test_or_star(self, p): - """argument : test_or_star_expr""" - p[0] = p[1] - - def p_argument_kwargs(self, p): - """argument : POW test""" - p[0] = ast.keyword(arg=None, value=p[2]) - - def p_argument_args(self, p): - """argument : TIMES test""" - p[0] = ast.Starred(value=p[2]) - - def p_argument(self, p): - """argument : test comp_for""" - p1 = p[1] - p[0] = ast.GeneratorExp( - elt=p1, generators=p[2]["comps"], lineno=p1.lineno, col_offset=p1.col_offset - ) - - def p_argument_eq(self, p): - """argument : test EQUALS test""" - p[0] = ast.keyword(arg=p[1].id, value=p[3]) diff --git a/xonsh/parsers/v36.py b/xonsh/parsers/v36.py deleted file mode 100644 index 6a6dc6f..0000000 --- a/xonsh/parsers/v36.py +++ /dev/null @@ -1,43 +0,0 @@ -# -*- coding: utf-8 -*- -"""Implements the xonsh parser for Python v3.6.""" -import xonsh.ast as ast -from xonsh.parsers.v35 import Parser as ThreeFiveParser -from xonsh.parsers.base import store_ctx, ensure_has_elts, lopen_loc - - -class Parser(ThreeFiveParser): - """A Python v3.6 compliant parser for the xonsh language.""" - - def p_comp_for(self, p): - """comp_for : FOR exprlist IN or_test comp_iter_opt""" - targs, it, p5 = p[2], p[4], p[5] - if len(targs) == 1: - targ = targs[0] - else: - targ = ensure_has_elts(targs) - store_ctx(targ) - # only difference with base should be the is_async=0 - comp = ast.comprehension(target=targ, iter=it, ifs=[], is_async=0) - comps = [comp] - p0 = {"comps": comps} - if p5 is not None: - comps += p5.get("comps", []) - comp.ifs += p5.get("if", []) - p[0] = p0 - - def p_expr_stmt_annassign(self, p): - """expr_stmt : testlist_star_expr COLON test EQUALS test""" - p1 = p[1][0] - lineno, col = lopen_loc(p1) - if len(p[1]) > 1 or not isinstance(p1, ast.Name): - loc = self.currloc(lineno, col) - self._parse_error("only single target can be annotated", loc) - store_ctx(p1) - p[0] = ast.AnnAssign( - target=p1, - annotation=p[3], - value=p[5], - simple=1, - lineno=lineno, - col_offset=col, - ) diff --git a/xonsh/platform.py b/xonsh/platform.py deleted file mode 100644 index daf196c..0000000 --- a/xonsh/platform.py +++ /dev/null @@ -1,631 +0,0 @@ -"""Module for platform-specific constants and implementations, as well as -compatibility layers to make use of the 'best' implementation available -on a platform. -""" -import os -import sys -import ctypes -import signal -import pathlib -import builtins -import platform -import functools -import subprocess -import collections -import collections.abc as cabc -import importlib.util - -from xonsh.lazyasd import LazyBool, lazyobject, lazybool - -# do not import any xonsh-modules here to avoid circular dependencies - -FD_STDIN = 0 -FD_STDOUT = 1 -FD_STDERR = 2 - - -@lazyobject -def distro(): - try: - import distro as d - except ImportError: - d = None - except Exception: - raise - return d - - -# -# OS -# -ON_DARWIN = LazyBool(lambda: platform.system() == "Darwin", globals(), "ON_DARWIN") -"""``True`` if executed on a Darwin platform, else ``False``. """ -ON_LINUX = LazyBool(lambda: platform.system() == "Linux", globals(), "ON_LINUX") -"""``True`` if executed on a Linux platform, else ``False``. """ -ON_WINDOWS = LazyBool(lambda: platform.system() == "Windows", globals(), "ON_WINDOWS") -"""``True`` if executed on a native Windows platform, else ``False``. """ -ON_CYGWIN = LazyBool(lambda: sys.platform == "cygwin", globals(), "ON_CYGWIN") -"""``True`` if executed on a Cygwin Windows platform, else ``False``. """ -ON_MSYS = LazyBool(lambda: sys.platform == "msys", globals(), "ON_MSYS") -"""``True`` if executed on a MSYS Windows platform, else ``False``. """ -ON_POSIX = LazyBool(lambda: (os.name == "posix"), globals(), "ON_POSIX") -"""``True`` if executed on a POSIX-compliant platform, else ``False``. """ -ON_FREEBSD = LazyBool( - lambda: (sys.platform.startswith("freebsd")), globals(), "ON_FREEBSD" -) -"""``True`` if on a FreeBSD operating system, else ``False``.""" -ON_DRAGONFLY = LazyBool( - lambda: (sys.platform.startswith("dragonfly")), globals(), "ON_DRAGONFLY" -) -"""``True`` if on a DragonFly BSD operating system, else ``False``.""" -ON_NETBSD = LazyBool( - lambda: (sys.platform.startswith("netbsd")), globals(), "ON_NETBSD" -) -"""``True`` if on a NetBSD operating system, else ``False``.""" - - -@lazybool -def ON_BSD(): - """``True`` if on a BSD operating system, else ``False``.""" - return bool(ON_FREEBSD) or bool(ON_NETBSD) or bool(ON_DRAGONFLY) - - -@lazybool -def ON_BEOS(): - """True if we are on BeOS or Haiku.""" - return sys.platform == "beos5" or sys.platform == "haiku1" - - -@lazybool -def ON_WSL(): - """True if we are on Windows Subsystem for Linux (WSL)""" - return "Microsoft" in platform.release() - - -# -# Python & packages -# - -PYTHON_VERSION_INFO = sys.version_info[:3] -""" Version of Python interpreter as three-value tuple. """ - - -@lazyobject -def PYTHON_VERSION_INFO_BYTES(): - """The python version info tuple in a canonical bytes form.""" - return ".".join(map(str, sys.version_info)).encode() - - -ON_ANACONDA = LazyBool( - lambda: pathlib.Path(sys.prefix).joinpath("conda-meta").exists(), - globals(), - "ON_ANACONDA", -) -""" ``True`` if executed in an Anaconda instance, else ``False``. """ -CAN_RESIZE_WINDOW = LazyBool( - lambda: hasattr(signal, "SIGWINCH"), globals(), "CAN_RESIZE_WINDOW" -) -"""``True`` if we can resize terminal window, as provided by the presense of -signal.SIGWINCH, else ``False``. -""" - - -@lazybool -def HAS_PYGMENTS(): - """``True`` if `pygments` is available, else ``False``.""" - spec = importlib.util.find_spec("pygments") - return spec is not None - - -@functools.lru_cache(1) -def pygments_version(): - """pygments.__version__ version if available, else None.""" - if HAS_PYGMENTS: - import pygments - - v = pygments.__version__ - else: - v = None - return v - - -@functools.lru_cache(1) -def pygments_version_info(): - """ Returns `pygments`'s version as tuple of integers. """ - if HAS_PYGMENTS: - return tuple(int(x) for x in pygments_version().strip("<>+-=.").split(".")) - else: - return None - - -@functools.lru_cache(1) -def has_prompt_toolkit(): - """Tests if the `prompt_toolkit` is available.""" - spec = importlib.util.find_spec("prompt_toolkit") - return spec is not None - - -@functools.lru_cache(1) -def ptk_version(): - """Returns `prompt_toolkit.__version__` if available, else ``None``.""" - if has_prompt_toolkit(): - import prompt_toolkit - - return getattr(prompt_toolkit, "__version__", "<0.57") - else: - return None - - -@functools.lru_cache(1) -def ptk_version_info(): - """ Returns `prompt_toolkit`'s version as tuple of integers. """ - if has_prompt_toolkit(): - return tuple(int(x) for x in ptk_version().strip("<>+-=.").split(".")) - else: - return None - - -@functools.lru_cache(1) -def ptk_above_min_supported(): - minimum_required_ptk_version = (1, 0) - return ptk_version_info()[:2] >= minimum_required_ptk_version - - -@functools.lru_cache(1) -def ptk_shell_type(): - """Returns the prompt_toolkit shell type based on the installed version.""" - if ptk_version_info()[:2] < (2, 0): - return "prompt_toolkit1" - else: - return "prompt_toolkit2" - - -@functools.lru_cache(1) -def win_ansi_support(): - if ON_WINDOWS: - try: - from prompt_toolkit.utils import is_windows_vt100_supported, is_conemu_ansi - except ImportError: - return False - return is_conemu_ansi() or is_windows_vt100_supported() - else: - return False - - -@functools.lru_cache(1) -def ptk_below_max_supported(): - ptk_max_version_cutoff = (2, 0) - return ptk_version_info()[:2] < ptk_max_version_cutoff - - -@functools.lru_cache(1) -def best_shell_type(): - if builtins.__xonsh__.env.get("TERM", "") == "dumb": - return "dumb" - elif ON_WINDOWS or has_prompt_toolkit(): - return "prompt_toolkit" - else: - return "readline" - - -@functools.lru_cache(1) -def is_readline_available(): - """Checks if readline is available to import.""" - spec = importlib.util.find_spec("readline") - return spec is not None - - -@lazyobject -def seps(): - """String of all path separators.""" - s = os.path.sep - if os.path.altsep is not None: - s += os.path.altsep - return s - - -def pathsplit(p): - """This is a safe version of os.path.split(), which does not work on input - without a drive. - """ - n = len(p) - while n and p[n - 1] not in seps: - n -= 1 - pre = p[:n] - pre = pre.rstrip(seps) or pre - post = p[n:] - return pre, post - - -def pathbasename(p): - """This is a safe version of os.path.basename(), which does not work on - input without a drive. This version does. - """ - return pathsplit(p)[-1] - - -@lazyobject -def expanduser(): - """Dispatches to the correct platform-dependent expanduser() function.""" - if ON_WINDOWS: - return windows_expanduser - else: - return os.path.expanduser - - -def windows_expanduser(path): - """A Windows-specific expanduser() function for xonsh. This is needed - since os.path.expanduser() does not check on Windows if the user actually - exists. This restricts expanding the '~' if it is not followed by a - separator. That is only '~/' and '~\' are expanded. - """ - if not path.startswith("~"): - return path - elif len(path) < 2 or path[1] in seps: - return os.path.expanduser(path) - else: - return path - - -# termios tc(get|set)attr indexes. -IFLAG = 0 -OFLAG = 1 -CFLAG = 2 -LFLAG = 3 -ISPEED = 4 -OSPEED = 5 -CC = 6 - - -# -# Dev release info -# - - -@functools.lru_cache(1) -def githash(): - """Returns a tuple contains two strings: the hash and the date.""" - install_base = os.path.dirname(__file__) - githash_file = "{}/dev.githash".format(install_base) - if not os.path.exists(githash_file): - return None, None - sha = None - date_ = None - try: - with open(githash_file) as f: - sha, date_ = f.read().strip().split("|") - except ValueError: - pass - return sha, date_ - - -# -# Encoding -# - -DEFAULT_ENCODING = sys.getdefaultencoding() -""" Default string encoding. """ - - -if PYTHON_VERSION_INFO < (3, 5, 0): - - class DirEntry: - def __init__(self, directory, name): - self.__path__ = pathlib.Path(directory) / name - self.name = name - self.path = str(self.__path__) - self.is_symlink = self.__path__.is_symlink - - def inode(self): - return os.stat(self.path, follow_symlinks=False).st_ino - - def is_dir(self, *, follow_symlinks=True): - if follow_symlinks: - return self.__path__.is_dir() - else: - return not self.__path__.is_symlink() and self.__path__.is_dir() - - def is_file(self, *, follow_symlinks=True): - if follow_symlinks: - return self.__path__.is_file() - else: - return not self.__path__.is_symlink() and self.__path__.is_file() - - def stat(self, *, follow_symlinks=True): - return os.stat(self.path, follow_symlinks=follow_symlinks) - - def scandir(path): - """ Compatibility layer for `os.scandir` from Python 3.5+. """ - return (DirEntry(path, x) for x in os.listdir(path)) - - -else: - scandir = os.scandir - - -# -# Linux distro -# - - -@functools.lru_cache(1) -def linux_distro(): - """The id of the Linux distribution running on, possibly 'unknown'. - None on non-Linux platforms. - """ - if ON_LINUX: - if distro: - ld = distro.id() - elif PYTHON_VERSION_INFO < (3, 6, 6): - ld = platform.linux_distribution()[0] or "unknown" - elif "-ARCH-" in platform.platform(): - ld = "arch" # that's the only one we need to know for now - else: - ld = "unknown" - else: - ld = None - return ld - - -# -# Windows -# - - -@functools.lru_cache(1) -def git_for_windows_path(): - """Returns the path to git for windows, if available and None otherwise.""" - import winreg - - try: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, "SOFTWARE\\GitForWindows") - gfwp, _ = winreg.QueryValueEx(key, "InstallPath") - except FileNotFoundError: - gfwp = None - return gfwp - - -@functools.lru_cache(1) -def windows_bash_command(): - """Determines the command for Bash on windows.""" - # Check that bash is on path otherwise try the default directory - # used by Git for windows - wbc = "bash" - cmd_cache = builtins.__xonsh__.commands_cache - bash_on_path = cmd_cache.lazy_locate_binary("bash", ignore_alias=True) - if bash_on_path: - try: - out = subprocess.check_output( - [bash_on_path, "--version"], - stderr=subprocess.PIPE, - universal_newlines=True, - ) - except subprocess.CalledProcessError: - bash_works = False - else: - # Check if Bash is from the "Windows Subsystem for Linux" (WSL) - # which can't be used by xonsh foreign-shell/completer - bash_works = out and "pc-linux-gnu" not in out.splitlines()[0] - - if bash_works: - wbc = bash_on_path - else: - gfwp = git_for_windows_path() - if gfwp: - bashcmd = os.path.join(gfwp, "bin\\bash.exe") - if os.path.isfile(bashcmd): - wbc = bashcmd - return wbc - - -# -# Environment variables defaults -# - -if ON_WINDOWS: - - class OSEnvironCasePreserving(cabc.MutableMapping): - """ Case-preserving wrapper for os.environ on Windows. - It uses nt.environ to get the correct cased keys on - initialization. It also preserves the case of any variables - add after initialization. - """ - - def __init__(self): - import nt - - self._upperkeys = dict((k.upper(), k) for k in nt.environ) - - def _sync(self): - """ Ensure that the case sensitive map of the keys are - in sync with os.environ - """ - envkeys = set(os.environ.keys()) - for key in envkeys.difference(self._upperkeys): - self._upperkeys[key] = key.upper() - for key in set(self._upperkeys).difference(envkeys): - del self._upperkeys[key] - - def __contains__(self, k): - self._sync() - return k.upper() in self._upperkeys - - def __len__(self): - self._sync() - return len(self._upperkeys) - - def __iter__(self): - self._sync() - return iter(self._upperkeys.values()) - - def __getitem__(self, k): - self._sync() - return os.environ[k] - - def __setitem__(self, k, v): - self._sync() - self._upperkeys[k.upper()] = k - os.environ[k] = v - - def __delitem__(self, k): - self._sync() - if k.upper() in self._upperkeys: - del self._upperkeys[k.upper()] - del os.environ[k] - - def getkey_actual_case(self, k): - self._sync() - return self._upperkeys.get(k.upper()) - - -@lazyobject -def os_environ(): - """This dispatches to the correct, case-sensitive version of os.environ. - This is mainly a problem for Windows. See #2024 for more details. - This can probably go away once support for Python v3.5 or v3.6 is - dropped. - """ - if ON_WINDOWS: - return OSEnvironCasePreserving() - else: - return os.environ - - -@functools.lru_cache(1) -def bash_command(): - """Determines the command for Bash on the current platform.""" - if ON_WINDOWS: - bc = windows_bash_command() - else: - bc = "bash" - return bc - - -@lazyobject -def BASH_COMPLETIONS_DEFAULT(): - """A possibly empty tuple with default paths to Bash completions known for - the current platform. - """ - if ON_LINUX or ON_CYGWIN or ON_MSYS: - bcd = ("/usr/share/bash-completion/bash_completion",) - elif ON_DARWIN: - bcd = ( - "/usr/local/share/bash-completion/bash_completion", # v2.x - "/usr/local/etc/bash_completion", - "/opt/local/etc/profile.d/bash_completion.sh", - "/usr/local/etc/bash_completion.d/git-completion.bash", - ) # v1.x - elif ON_WINDOWS and git_for_windows_path(): - bcd = ( - os.path.join( - git_for_windows_path(), "usr\\share\\bash-completion\\bash_completion" - ), - os.path.join( - git_for_windows_path(), - "mingw64\\share\\git\\completion\\" "git-completion.bash", - ), - ) - else: - bcd = () - return bcd - - -@lazyobject -def PATH_DEFAULT(): - if ON_LINUX or ON_CYGWIN or ON_MSYS: - if linux_distro() == "arch": - pd = ( - "/usr/local/sbin", - "/usr/local/bin", - "/usr/bin", - "/usr/bin/site_perl", - "/usr/bin/vendor_perl", - "/usr/bin/core_perl", - ) - else: - pd = ( - os.path.expanduser("~/bin"), - "/usr/local/sbin", - "/usr/local/bin", - "/usr/sbin", - "/usr/bin", - "/sbin", - "/bin", - "/usr/games", - "/usr/local/games", - ) - elif ON_DARWIN: - pd = ("/usr/local/bin", "/usr/bin", "/bin", "/usr/sbin", "/sbin") - elif ON_WINDOWS: - import winreg - - key = winreg.OpenKey( - winreg.HKEY_LOCAL_MACHINE, - r"SYSTEM\CurrentControlSet\Control\Session Manager\Environment", - ) - pd = tuple(winreg.QueryValueEx(key, "Path")[0].split(os.pathsep)) - else: - pd = () - return pd - - -# -# libc -# -@lazyobject -def LIBC(): - """The platform dependent libc implementation.""" - global ctypes - if ON_DARWIN: - import ctypes.util - - libc = ctypes.CDLL(ctypes.util.find_library("c")) - elif ON_CYGWIN: - libc = ctypes.CDLL("cygwin1.dll") - elif ON_MSYS: - libc = ctypes.CDLL("msys-2.0.dll") - elif ON_BSD: - try: - libc = ctypes.CDLL("libc.so") - except AttributeError: - libc = None - except OSError: - # OS X; can't use ctypes.util.find_library because that creates - # a new process on Linux, which is undesirable. - try: - libc = ctypes.CDLL("libc.dylib") - except OSError: - libc = None - elif ON_POSIX: - try: - libc = ctypes.CDLL("libc.so") - except AttributeError: - libc = None - except OSError: - # Debian and derivatives do the wrong thing because /usr/lib/libc.so - # is a GNU ld script rather than an ELF object. To get around this, we - # have to be more specific. - # We don't want to use ctypes.util.find_library because that creates a - # new process on Linux. We also don't want to try too hard because at - # this point we're already pretty sure this isn't Linux. - try: - libc = ctypes.CDLL("libc.so.6") - except OSError: - libc = None - if not hasattr(libc, "sysinfo"): - # Not Linux. - libc = None - elif ON_WINDOWS: - if hasattr(ctypes, "windll") and hasattr(ctypes.windll, "kernel32"): - libc = ctypes.windll.kernel32 - else: - try: - # Windows CE uses the cdecl calling convention. - libc = ctypes.CDLL("coredll.lib") - except (AttributeError, OSError): - libc = None - elif ON_BEOS: - libc = ctypes.CDLL("libroot.so") - else: - libc = None - return libc diff --git a/xonsh/ply/.gitignore b/xonsh/ply/.gitignore deleted file mode 100644 index bd46d6e..0000000 --- a/xonsh/ply/.gitignore +++ /dev/null @@ -1,9 +0,0 @@ -*.pyc -*.pyo -__pycache__ -*.out -*.dif -*~ -/dist -/build -/*.egg-info diff --git a/xonsh/ply/.travis.yml b/xonsh/ply/.travis.yml deleted file mode 100644 index b33e49b..0000000 --- a/xonsh/ply/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: python -python: - - "2.6" - - "2.7" - - "3.3" - - "3.4" - - "3.5" - - "3.6" -install: true -script: "cd test && python testlex.py && python testyacc.py" diff --git a/xonsh/ply/CHANGES b/xonsh/ply/CHANGES deleted file mode 100644 index 9a90a55..0000000 --- a/xonsh/ply/CHANGES +++ /dev/null @@ -1,1426 +0,0 @@ -Current Version ---------------- -IMPORTANT NOTE (2018-12-22): PLY is no longer be released in any -package-installable format. If you want to use the latest version, you -need to COPY the contents of the ply/ directory into your own project -and use it. Although PLY is no longer distributed as a package, it is -maintained as a mature library. No new features are planned, but -issues and pull requests for bugs are still welcome. Any changes to the -software will be noted here. - -01/19/19 Some improvements to the preprocessor module contributed by - Rob Reilink. Issue #195 fixes the evaluation of expressions - such as #if a != b. Issue #196 fixes some some issues - related to the lack of a specified Unicode encoding on reading - text files. - -12/22/18 Incorporated some minor changes to eliminate warning messages - related to open() calls. - -Version 3.11 ---------------------- -02/15/18 beazley - Fixed some minor bugs related to re flags and token order. - Github pull requests #151 and #153. - -02/15/18 beazley - Added a set_lexpos() method to grammar symbols. Github issue #148. - - -04/13/17 beazley - Mostly minor bug fixes and small code cleanups. - -Version 3.10 ---------------------- -01/31/17: beazley - Changed grammar signature computation to not involve hashing - functions. Parts are just combined into a big string. - -10/07/16: beazley - Fixed Issue #101: Incorrect shift-reduce conflict resolution with - precedence specifier. - - PLY was incorrectly resolving shift-reduce conflicts in certain - cases. For example, in the example/calc/calc.py example, you - could trigger it doing this: - - calc > -3 - 4 - 1 (correct answer should be -7) - calc > - - Issue and suggested patch contributed by https://github.com/RomaVis - -Version 3.9 ---------------------- -08/30/16: beazley - Exposed the parser state number as the parser.state attribute - in productions and error functions. For example: - - def p_somerule(p): - ''' - rule : A B C - ''' - print('State:', p.parser.state) - - May address issue #65 (publish current state in error callback). - -08/30/16: beazley - Fixed Issue #88. Python3 compatibility with ply/cpp. - -08/30/16: beazley - Fixed Issue #93. Ply can crash if SyntaxError is raised inside - a production. Not actually sure if the original implementation - worked as documented at all. Yacc has been modified to follow - the spec as outlined in the CHANGES noted for 11/27/07 below. - -08/30/16: beazley - Fixed Issue #97. Failure with code validation when the original - source files aren't present. Validation step now ignores - the missing file. - -08/30/16: beazley - Minor fixes to version numbers. - -Version 3.8 ---------------------- -10/02/15: beazley - Fixed issues related to Python 3.5. Patch contributed by Barry Warsaw. - -Version 3.7 ---------------------- -08/25/15: beazley - Fixed problems when reading table files from pickled data. - -05/07/15: beazley - Fixed regression in handling of table modules if specified as module - objects. See https://github.com/dabeaz/ply/issues/63 - -Version 3.6 ---------------------- -04/25/15: beazley - If PLY is unable to create the 'parser.out' or 'parsetab.py' files due - to permission issues, it now just issues a warning message and - continues to operate. This could happen if a module using PLY - is installed in a funny way where tables have to be regenerated, but - for whatever reason, the user doesn't have write permission on - the directory where PLY wants to put them. - -04/24/15: beazley - Fixed some issues related to use of packages and table file - modules. Just to emphasize, PLY now generates its special - files such as 'parsetab.py' and 'lextab.py' in the *SAME* - directory as the source file that uses lex() and yacc(). - - If for some reason, you want to change the name of the table - module, use the tabmodule and lextab options: - - lexer = lex.lex(lextab='spamlextab') - parser = yacc.yacc(tabmodule='spamparsetab') - - If you specify a simple name as shown, the module will still be - created in the same directory as the file invoking lex() or yacc(). - If you want the table files to be placed into a different package, - then give a fully qualified package name. For example: - - lexer = lex.lex(lextab='pkgname.files.lextab') - parser = yacc.yacc(tabmodule='pkgname.files.parsetab') - - For this to work, 'pkgname.files' must already exist as a valid - Python package (i.e., the directories must already exist and be - set up with the proper __init__.py files, etc.). - -Version 3.5 ---------------------- -04/21/15: beazley - Added support for defaulted_states in the parser. A - defaulted_state is a state where the only legal action is a - reduction of a single grammar rule across all valid input - tokens. For such states, the rule is reduced and the - reading of the next lookahead token is delayed until it is - actually needed at a later point in time. - - This delay in consuming the next lookahead token is a - potentially important feature in advanced parsing - applications that require tight interaction between the - lexer and the parser. For example, a grammar rule change - modify the lexer state upon reduction and have such changes - take effect before the next input token is read. - - *** POTENTIAL INCOMPATIBILITY *** - One potential danger of defaulted_states is that syntax - errors might be deferred to a a later point of processing - than where they were detected in past versions of PLY. - Thus, it's possible that your error handling could change - slightly on the same inputs. defaulted_states do not change - the overall parsing of the input (i.e., the same grammar is - accepted). - - If for some reason, you need to disable defaulted states, - you can do this: - - parser = yacc.yacc() - parser.defaulted_states = {} - -04/21/15: beazley - Fixed debug logging in the parser. It wasn't properly reporting goto states - on grammar rule reductions. - -04/20/15: beazley - Added actions to be defined to character literals (Issue #32). For example: - - literals = [ '{', '}' ] - - def t_lbrace(t): - r'\{' - # Some action - t.type = '{' - return t - - def t_rbrace(t): - r'\}' - # Some action - t.type = '}' - return t - -04/19/15: beazley - Import of the 'parsetab.py' file is now constrained to only consider the - directory specified by the outputdir argument to yacc(). If not supplied, - the import will only consider the directory in which the grammar is defined. - This should greatly reduce problems with the wrong parsetab.py file being - imported by mistake. For example, if it's found somewhere else on the path - by accident. - - *** POTENTIAL INCOMPATIBILITY *** It's possible that this might break some - packaging/deployment setup if PLY was instructed to place its parsetab.py - in a different location. You'll have to specify a proper outputdir= argument - to yacc() to fix this if needed. - -04/19/15: beazley - Changed default output directory to be the same as that in which the - yacc grammar is defined. If your grammar is in a file 'calc.py', - then the parsetab.py and parser.out files should be generated in the - same directory as that file. The destination directory can be changed - using the outputdir= argument to yacc(). - -04/19/15: beazley - Changed the parsetab.py file signature slightly so that the parsetab won't - regenerate if created on a different major version of Python (ie., a - parsetab created on Python 2 will work with Python 3). - -04/16/15: beazley - Fixed Issue #44 call_errorfunc() should return the result of errorfunc() - -04/16/15: beazley - Support for versions of Python <2.7 is officially dropped. PLY may work, but - the unit tests requires Python 2.7 or newer. - -04/16/15: beazley - Fixed bug related to calling yacc(start=...). PLY wasn't regenerating the - table file correctly for this case. - -04/16/15: beazley - Added skipped tests for PyPy and Java. Related to use of Python's -O option. - -05/29/13: beazley - Added filter to make unit tests pass under 'python -3'. - Reported by Neil Muller. - -05/29/13: beazley - Fixed CPP_INTEGER regex in ply/cpp.py (Issue 21). - Reported by @vbraun. - -05/29/13: beazley - Fixed yacc validation bugs when from __future__ import unicode_literals - is being used. Reported by Kenn Knowles. - -05/29/13: beazley - Added support for Travis-CI. Contributed by Kenn Knowles. - -05/29/13: beazley - Added a .gitignore file. Suggested by Kenn Knowles. - -05/29/13: beazley - Fixed validation problems for source files that include a - different source code encoding specifier. Fix relies on - the inspect module. Should work on Python 2.6 and newer. - Not sure about older versions of Python. - Contributed by Michael Droettboom - -05/21/13: beazley - Fixed unit tests for yacc to eliminate random failures due to dict hash value - randomization in Python 3.3 - Reported by Arfrever - -10/15/12: beazley - Fixed comment whitespace processing bugs in ply/cpp.py. - Reported by Alexei Pososin. - -10/15/12: beazley - Fixed token names in ply/ctokens.py to match rule names. - Reported by Alexei Pososin. - -04/26/12: beazley - Changes to functions available in panic mode error recover. In previous versions - of PLY, the following global functions were available for use in the p_error() rule: - - yacc.errok() # Reset error state - yacc.token() # Get the next token - yacc.restart() # Reset the parsing stack - - The use of global variables was problematic for code involving multiple parsers - and frankly was a poor design overall. These functions have been moved to methods - of the parser instance created by the yacc() function. You should write code like - this: - - def p_error(p): - ... - parser.errok() - - parser = yacc.yacc() - - *** POTENTIAL INCOMPATIBILITY *** The original global functions now issue a - DeprecationWarning. - -04/19/12: beazley - Fixed some problems with line and position tracking and the use of error - symbols. If you have a grammar rule involving an error rule like this: - - def p_assignment_bad(p): - '''assignment : location EQUALS error SEMI''' - ... - - You can now do line and position tracking on the error token. For example: - - def p_assignment_bad(p): - '''assignment : location EQUALS error SEMI''' - start_line = p.lineno(3) - start_pos = p.lexpos(3) - - If the trackng=True option is supplied to parse(), you can additionally get - spans: - - def p_assignment_bad(p): - '''assignment : location EQUALS error SEMI''' - start_line, end_line = p.linespan(3) - start_pos, end_pos = p.lexspan(3) - - Note that error handling is still a hairy thing in PLY. This won't work - unless your lexer is providing accurate information. Please report bugs. - Suggested by a bug reported by Davis Herring. - -04/18/12: beazley - Change to doc string handling in lex module. Regex patterns are now first - pulled from a function's .regex attribute. If that doesn't exist, then - .doc is checked as a fallback. The @TOKEN decorator now sets the .regex - attribute of a function instead of its doc string. - Changed suggested by Kristoffer Ellersgaard Koch. - -04/18/12: beazley - Fixed issue #1: Fixed _tabversion. It should use __tabversion__ instead of __version__ - Reported by Daniele Tricoli - -04/18/12: beazley - Fixed issue #8: Literals empty list causes IndexError - Reported by Walter Nissen. - -04/18/12: beazley - Fixed issue #12: Typo in code snippet in documentation - Reported by florianschanda. - -04/18/12: beazley - Fixed issue #10: Correctly escape t_XOREQUAL pattern. - Reported by Andy Kittner. - -Version 3.4 ---------------------- -02/17/11: beazley - Minor patch to make cpp.py compatible with Python 3. Note: This - is an experimental file not currently used by the rest of PLY. - -02/17/11: beazley - Fixed setup.py trove classifiers to properly list PLY as - Python 3 compatible. - -01/02/11: beazley - Migration of repository to github. - -Version 3.3 ------------------------------ -08/25/09: beazley - Fixed issue 15 related to the set_lineno() method in yacc. Reported by - mdsherry. - -08/25/09: beazley - Fixed a bug related to regular expression compilation flags not being - properly stored in lextab.py files created by the lexer when running - in optimize mode. Reported by Bruce Frederiksen. - - -Version 3.2 ------------------------------ -03/24/09: beazley - Added an extra check to not print duplicated warning messages - about reduce/reduce conflicts. - -03/24/09: beazley - Switched PLY over to a BSD-license. - -03/23/09: beazley - Performance optimization. Discovered a few places to make - speedups in LR table generation. - -03/23/09: beazley - New warning message. PLY now warns about rules never - reduced due to reduce/reduce conflicts. Suggested by - Bruce Frederiksen. - -03/23/09: beazley - Some clean-up of warning messages related to reduce/reduce errors. - -03/23/09: beazley - Added a new picklefile option to yacc() to write the parsing - tables to a filename using the pickle module. Here is how - it works: - - yacc(picklefile="parsetab.p") - - This option can be used if the normal parsetab.py file is - extremely large. For example, on jython, it is impossible - to read parsing tables if the parsetab.py exceeds a certain - threshold. - - The filename supplied to the picklefile option is opened - relative to the current working directory of the Python - interpreter. If you need to refer to the file elsewhere, - you will need to supply an absolute or relative path. - - For maximum portability, the pickle file is written - using protocol 0. - -03/13/09: beazley - Fixed a bug in parser.out generation where the rule numbers - where off by one. - -03/13/09: beazley - Fixed a string formatting bug with one of the error messages. - Reported by Richard Reitmeyer - -Version 3.1 ------------------------------ -02/28/09: beazley - Fixed broken start argument to yacc(). PLY-3.0 broke this - feature by accident. - -02/28/09: beazley - Fixed debugging output. yacc() no longer reports shift/reduce - or reduce/reduce conflicts if debugging is turned off. This - restores similar behavior in PLY-2.5. Reported by Andrew Waters. - -Version 3.0 ------------------------------ -02/03/09: beazley - Fixed missing lexer attribute on certain tokens when - invoking the parser p_error() function. Reported by - Bart Whiteley. - -02/02/09: beazley - The lex() command now does all error-reporting and diagonistics - using the logging module interface. Pass in a Logger object - using the errorlog parameter to specify a different logger. - -02/02/09: beazley - Refactored ply.lex to use a more object-oriented and organized - approach to collecting lexer information. - -02/01/09: beazley - Removed the nowarn option from lex(). All output is controlled - by passing in a logger object. Just pass in a logger with a high - level setting to suppress output. This argument was never - documented to begin with so hopefully no one was relying upon it. - -02/01/09: beazley - Discovered and removed a dead if-statement in the lexer. This - resulted in a 6-7% speedup in lexing when I tested it. - -01/13/09: beazley - Minor change to the procedure for signalling a syntax error in a - production rule. A normal SyntaxError exception should be raised - instead of yacc.SyntaxError. - -01/13/09: beazley - Added a new method p.set_lineno(n,lineno) that can be used to set the - line number of symbol n in grammar rules. This simplifies manual - tracking of line numbers. - -01/11/09: beazley - Vastly improved debugging support for yacc.parse(). Instead of passing - debug as an integer, you can supply a Logging object (see the logging - module). Messages will be generated at the ERROR, INFO, and DEBUG - logging levels, each level providing progressively more information. - The debugging trace also shows states, grammar rule, values passed - into grammar rules, and the result of each reduction. - -01/09/09: beazley - The yacc() command now does all error-reporting and diagnostics using - the interface of the logging module. Use the errorlog parameter to - specify a logging object for error messages. Use the debuglog parameter - to specify a logging object for the 'parser.out' output. - -01/09/09: beazley - *HUGE* refactoring of the the ply.yacc() implementation. The high-level - user interface is backwards compatible, but the internals are completely - reorganized into classes. No more global variables. The internals - are also more extensible. For example, you can use the classes to - construct a LALR(1) parser in an entirely different manner than - what is currently the case. Documentation is forthcoming. - -01/07/09: beazley - Various cleanup and refactoring of yacc internals. - -01/06/09: beazley - Fixed a bug with precedence assignment. yacc was assigning the precedence - each rule based on the left-most token, when in fact, it should have been - using the right-most token. Reported by Bruce Frederiksen. - -11/27/08: beazley - Numerous changes to support Python 3.0 including removal of deprecated - statements (e.g., has_key) and the additional of compatibility code - to emulate features from Python 2 that have been removed, but which - are needed. Fixed the unit testing suite to work with Python 3.0. - The code should be backwards compatible with Python 2. - -11/26/08: beazley - Loosened the rules on what kind of objects can be passed in as the - "module" parameter to lex() and yacc(). Previously, you could only use - a module or an instance. Now, PLY just uses dir() to get a list of - symbols on whatever the object is without regard for its type. - -11/26/08: beazley - Changed all except: statements to be compatible with Python2.x/3.x syntax. - -11/26/08: beazley - Changed all raise Exception, value statements to raise Exception(value) for - forward compatibility. - -11/26/08: beazley - Removed all print statements from lex and yacc, using sys.stdout and sys.stderr - directly. Preparation for Python 3.0 support. - -11/04/08: beazley - Fixed a bug with referring to symbols on the the parsing stack using negative - indices. - -05/29/08: beazley - Completely revamped the testing system to use the unittest module for everything. - Added additional tests to cover new errors/warnings. - -Version 2.5 ------------------------------ -05/28/08: beazley - Fixed a bug with writing lex-tables in optimized mode and start states. - Reported by Kevin Henry. - -Version 2.4 ------------------------------ -05/04/08: beazley - A version number is now embedded in the table file signature so that - yacc can more gracefully accomodate changes to the output format - in the future. - -05/04/08: beazley - Removed undocumented .pushback() method on grammar productions. I'm - not sure this ever worked and can't recall ever using it. Might have - been an abandoned idea that never really got fleshed out. This - feature was never described or tested so removing it is hopefully - harmless. - -05/04/08: beazley - Added extra error checking to yacc() to detect precedence rules defined - for undefined terminal symbols. This allows yacc() to detect a potential - problem that can be really tricky to debug if no warning message or error - message is generated about it. - -05/04/08: beazley - lex() now has an outputdir that can specify the output directory for - tables when running in optimize mode. For example: - - lexer = lex.lex(optimize=True, lextab="ltab", outputdir="foo/bar") - - The behavior of specifying a table module and output directory are - more aligned with the behavior of yacc(). - -05/04/08: beazley - [Issue 9] - Fixed filename bug in when specifying the modulename in lex() and yacc(). - If you specified options such as the following: - - parser = yacc.yacc(tabmodule="foo.bar.parsetab",outputdir="foo/bar") - - yacc would create a file "foo.bar.parsetab.py" in the given directory. - Now, it simply generates a file "parsetab.py" in that directory. - Bug reported by cptbinho. - -05/04/08: beazley - Slight modification to lex() and yacc() to allow their table files - to be loaded from a previously loaded module. This might make - it easier to load the parsing tables from a complicated package - structure. For example: - - import foo.bar.spam.parsetab as parsetab - parser = yacc.yacc(tabmodule=parsetab) - - Note: lex and yacc will never regenerate the table file if used - in the form---you will get a warning message instead. - This idea suggested by Brian Clapper. - - -04/28/08: beazley - Fixed a big with p_error() functions being picked up correctly - when running in yacc(optimize=1) mode. Patch contributed by - Bart Whiteley. - -02/28/08: beazley - Fixed a bug with 'nonassoc' precedence rules. Basically the - non-precedence was being ignored and not producing the correct - run-time behavior in the parser. - -02/16/08: beazley - Slight relaxation of what the input() method to a lexer will - accept as a string. Instead of testing the input to see - if the input is a string or unicode string, it checks to see - if the input object looks like it contains string data. - This change makes it possible to pass string-like objects - in as input. For example, the object returned by mmap. - - import mmap, os - data = mmap.mmap(os.open(filename,os.O_RDONLY), - os.path.getsize(filename), - access=mmap.ACCESS_READ) - lexer.input(data) - - -11/29/07: beazley - Modification of ply.lex to allow token functions to aliased. - This is subtle, but it makes it easier to create libraries and - to reuse token specifications. For example, suppose you defined - a function like this: - - def number(t): - r'\d+' - t.value = int(t.value) - return t - - This change would allow you to define a token rule as follows: - - t_NUMBER = number - - In this case, the token type will be set to 'NUMBER' and use - the associated number() function to process tokens. - -11/28/07: beazley - Slight modification to lex and yacc to grab symbols from both - the local and global dictionaries of the caller. This - modification allows lexers and parsers to be defined using - inner functions and closures. - -11/28/07: beazley - Performance optimization: The lexer.lexmatch and t.lexer - attributes are no longer set for lexer tokens that are not - defined by functions. The only normal use of these attributes - would be in lexer rules that need to perform some kind of - special processing. Thus, it doesn't make any sense to set - them on every token. - - *** POTENTIAL INCOMPATIBILITY *** This might break code - that is mucking around with internal lexer state in some - sort of magical way. - -11/27/07: beazley - Added the ability to put the parser into error-handling mode - from within a normal production. To do this, simply raise - a yacc.SyntaxError exception like this: - - def p_some_production(p): - 'some_production : prod1 prod2' - ... - raise yacc.SyntaxError # Signal an error - - A number of things happen after this occurs: - - - The last symbol shifted onto the symbol stack is discarded - and parser state backed up to what it was before the - the rule reduction. - - - The current lookahead symbol is saved and replaced by - the 'error' symbol. - - - The parser enters error recovery mode where it tries - to either reduce the 'error' rule or it starts - discarding items off of the stack until the parser - resets. - - When an error is manually set, the parser does *not* call - the p_error() function (if any is defined). - *** NEW FEATURE *** Suggested on the mailing list - -11/27/07: beazley - Fixed structure bug in examples/ansic. Reported by Dion Blazakis. - -11/27/07: beazley - Fixed a bug in the lexer related to start conditions and ignored - token rules. If a rule was defined that changed state, but - returned no token, the lexer could be left in an inconsistent - state. Reported by - -11/27/07: beazley - Modified setup.py to support Python Eggs. Patch contributed by - Simon Cross. - -11/09/07: beazely - Fixed a bug in error handling in yacc. If a syntax error occurred and the - parser rolled the entire parse stack back, the parser would be left in in - inconsistent state that would cause it to trigger incorrect actions on - subsequent input. Reported by Ton Biegstraaten, Justin King, and others. - -11/09/07: beazley - Fixed a bug when passing empty input strings to yacc.parse(). This - would result in an error message about "No input given". Reported - by Andrew Dalke. - -Version 2.3 ------------------------------ -02/20/07: beazley - Fixed a bug with character literals if the literal '.' appeared as the - last symbol of a grammar rule. Reported by Ales Smrcka. - -02/19/07: beazley - Warning messages are now redirected to stderr instead of being printed - to standard output. - -02/19/07: beazley - Added a warning message to lex.py if it detects a literal backslash - character inside the t_ignore declaration. This is to help - problems that might occur if someone accidentally defines t_ignore - as a Python raw string. For example: - - t_ignore = r' \t' - - The idea for this is from an email I received from David Cimimi who - reported bizarre behavior in lexing as a result of defining t_ignore - as a raw string by accident. - -02/18/07: beazley - Performance improvements. Made some changes to the internal - table organization and LR parser to improve parsing performance. - -02/18/07: beazley - Automatic tracking of line number and position information must now be - enabled by a special flag to parse(). For example: - - yacc.parse(data,tracking=True) - - In many applications, it's just not that important to have the - parser automatically track all line numbers. By making this an - optional feature, it allows the parser to run significantly faster - (more than a 20% speed increase in many cases). Note: positional - information is always available for raw tokens---this change only - applies to positional information associated with nonterminal - grammar symbols. - *** POTENTIAL INCOMPATIBILITY *** - -02/18/07: beazley - Yacc no longer supports extended slices of grammar productions. - However, it does support regular slices. For example: - - def p_foo(p): - '''foo: a b c d e''' - p[0] = p[1:3] - - This change is a performance improvement to the parser--it streamlines - normal access to the grammar values since slices are now handled in - a __getslice__() method as opposed to __getitem__(). - -02/12/07: beazley - Fixed a bug in the handling of token names when combined with - start conditions. Bug reported by Todd O'Bryan. - -Version 2.2 ------------------------------- -11/01/06: beazley - Added lexpos() and lexspan() methods to grammar symbols. These - mirror the same functionality of lineno() and linespan(). For - example: - - def p_expr(p): - 'expr : expr PLUS expr' - p.lexpos(1) # Lexing position of left-hand-expression - p.lexpos(1) # Lexing position of PLUS - start,end = p.lexspan(3) # Lexing range of right hand expression - -11/01/06: beazley - Minor change to error handling. The recommended way to skip characters - in the input is to use t.lexer.skip() as shown here: - - def t_error(t): - print "Illegal character '%s'" % t.value[0] - t.lexer.skip(1) - - The old approach of just using t.skip(1) will still work, but won't - be documented. - -10/31/06: beazley - Discarded tokens can now be specified as simple strings instead of - functions. To do this, simply include the text "ignore_" in the - token declaration. For example: - - t_ignore_cppcomment = r'//.*' - - Previously, this had to be done with a function. For example: - - def t_ignore_cppcomment(t): - r'//.*' - pass - - If start conditions/states are being used, state names should appear - before the "ignore_" text. - -10/19/06: beazley - The Lex module now provides support for flex-style start conditions - as described at http://www.gnu.org/software/flex/manual/html_chapter/flex_11.html. - Please refer to this document to understand this change note. Refer to - the PLY documentation for PLY-specific explanation of how this works. - - To use start conditions, you first need to declare a set of states in - your lexer file: - - states = ( - ('foo','exclusive'), - ('bar','inclusive') - ) - - This serves the same role as the %s and %x specifiers in flex. - - One a state has been declared, tokens for that state can be - declared by defining rules of the form t_state_TOK. For example: - - t_PLUS = '\+' # Rule defined in INITIAL state - t_foo_NUM = '\d+' # Rule defined in foo state - t_bar_NUM = '\d+' # Rule defined in bar state - - t_foo_bar_NUM = '\d+' # Rule defined in both foo and bar - t_ANY_NUM = '\d+' # Rule defined in all states - - In addition to defining tokens for each state, the t_ignore and t_error - specifications can be customized for specific states. For example: - - t_foo_ignore = " " # Ignored characters for foo state - def t_bar_error(t): - # Handle errors in bar state - - With token rules, the following methods can be used to change states - - def t_TOKNAME(t): - t.lexer.begin('foo') # Begin state 'foo' - t.lexer.push_state('foo') # Begin state 'foo', push old state - # onto a stack - t.lexer.pop_state() # Restore previous state - t.lexer.current_state() # Returns name of current state - - These methods mirror the BEGIN(), yy_push_state(), yy_pop_state(), and - yy_top_state() functions in flex. - - The use of start states can be used as one way to write sub-lexers. - For example, the lexer or parser might instruct the lexer to start - generating a different set of tokens depending on the context. - - example/yply/ylex.py shows the use of start states to grab C/C++ - code fragments out of traditional yacc specification files. - - *** NEW FEATURE *** Suggested by Daniel Larraz with whom I also - discussed various aspects of the design. - -10/19/06: beazley - Minor change to the way in which yacc.py was reporting shift/reduce - conflicts. Although the underlying LALR(1) algorithm was correct, - PLY was under-reporting the number of conflicts compared to yacc/bison - when precedence rules were in effect. This change should make PLY - report the same number of conflicts as yacc. - -10/19/06: beazley - Modified yacc so that grammar rules could also include the '-' - character. For example: - - def p_expr_list(p): - 'expression-list : expression-list expression' - - Suggested by Oldrich Jedlicka. - -10/18/06: beazley - Attribute lexer.lexmatch added so that token rules can access the re - match object that was generated. For example: - - def t_FOO(t): - r'some regex' - m = t.lexer.lexmatch - # Do something with m - - - This may be useful if you want to access named groups specified within - the regex for a specific token. Suggested by Oldrich Jedlicka. - -10/16/06: beazley - Changed the error message that results if an illegal character - is encountered and no default error function is defined in lex. - The exception is now more informative about the actual cause of - the error. - -Version 2.1 ------------------------------- -10/02/06: beazley - The last Lexer object built by lex() can be found in lex.lexer. - The last Parser object built by yacc() can be found in yacc.parser. - -10/02/06: beazley - New example added: examples/yply - - This example uses PLY to convert Unix-yacc specification files to - PLY programs with the same grammar. This may be useful if you - want to convert a grammar from bison/yacc to use with PLY. - -10/02/06: beazley - Added support for a start symbol to be specified in the yacc - input file itself. Just do this: - - start = 'name' - - where 'name' matches some grammar rule. For example: - - def p_name(p): - 'name : A B C' - ... - - This mirrors the functionality of the yacc %start specifier. - -09/30/06: beazley - Some new examples added.: - - examples/GardenSnake : A simple indentation based language similar - to Python. Shows how you might handle - whitespace. Contributed by Andrew Dalke. - - examples/BASIC : An implementation of 1964 Dartmouth BASIC. - Contributed by Dave against his better - judgement. - -09/28/06: beazley - Minor patch to allow named groups to be used in lex regular - expression rules. For example: - - t_QSTRING = r'''(?P['"]).*?(?P=quote)''' - - Patch submitted by Adam Ring. - -09/28/06: beazley - LALR(1) is now the default parsing method. To use SLR, use - yacc.yacc(method="SLR"). Note: there is no performance impact - on parsing when using LALR(1) instead of SLR. However, constructing - the parsing tables will take a little longer. - -09/26/06: beazley - Change to line number tracking. To modify line numbers, modify - the line number of the lexer itself. For example: - - def t_NEWLINE(t): - r'\n' - t.lexer.lineno += 1 - - This modification is both cleanup and a performance optimization. - In past versions, lex was monitoring every token for changes in - the line number. This extra processing is unnecessary for a vast - majority of tokens. Thus, this new approach cleans it up a bit. - - *** POTENTIAL INCOMPATIBILITY *** - You will need to change code in your lexer that updates the line - number. For example, "t.lineno += 1" becomes "t.lexer.lineno += 1" - -09/26/06: beazley - Added the lexing position to tokens as an attribute lexpos. This - is the raw index into the input text at which a token appears. - This information can be used to compute column numbers and other - details (e.g., scan backwards from lexpos to the first newline - to get a column position). - -09/25/06: beazley - Changed the name of the __copy__() method on the Lexer class - to clone(). This is used to clone a Lexer object (e.g., if - you're running different lexers at the same time). - -09/21/06: beazley - Limitations related to the use of the re module have been eliminated. - Several users reported problems with regular expressions exceeding - more than 100 named groups. To solve this, lex.py is now capable - of automatically splitting its master regular regular expression into - smaller expressions as needed. This should, in theory, make it - possible to specify an arbitrarily large number of tokens. - -09/21/06: beazley - Improved error checking in lex.py. Rules that match the empty string - are now rejected (otherwise they cause the lexer to enter an infinite - loop). An extra check for rules containing '#' has also been added. - Since lex compiles regular expressions in verbose mode, '#' is interpreted - as a regex comment, it is critical to use '\#' instead. - -09/18/06: beazley - Added a @TOKEN decorator function to lex.py that can be used to - define token rules where the documentation string might be computed - in some way. - - digit = r'([0-9])' - nondigit = r'([_A-Za-z])' - identifier = r'(' + nondigit + r'(' + digit + r'|' + nondigit + r')*)' - - from ply.lex import TOKEN - - @TOKEN(identifier) - def t_ID(t): - # Do whatever - - The @TOKEN decorator merely sets the documentation string of the - associated token function as needed for lex to work. - - Note: An alternative solution is the following: - - def t_ID(t): - # Do whatever - - t_ID.__doc__ = identifier - - Note: Decorators require the use of Python 2.4 or later. If compatibility - with old versions is needed, use the latter solution. - - The need for this feature was suggested by Cem Karan. - -09/14/06: beazley - Support for single-character literal tokens has been added to yacc. - These literals must be enclosed in quotes. For example: - - def p_expr(p): - "expr : expr '+' expr" - ... - - def p_expr(p): - 'expr : expr "-" expr' - ... - - In addition to this, it is necessary to tell the lexer module about - literal characters. This is done by defining the variable 'literals' - as a list of characters. This should be defined in the module that - invokes the lex.lex() function. For example: - - literals = ['+','-','*','/','(',')','='] - - or simply - - literals = '+=*/()=' - - It is important to note that literals can only be a single character. - When the lexer fails to match a token using its normal regular expression - rules, it will check the current character against the literal list. - If found, it will be returned with a token type set to match the literal - character. Otherwise, an illegal character will be signalled. - - -09/14/06: beazley - Modified PLY to install itself as a proper Python package called 'ply'. - This will make it a little more friendly to other modules. This - changes the usage of PLY only slightly. Just do this to import the - modules - - import ply.lex as lex - import ply.yacc as yacc - - Alternatively, you can do this: - - from ply import * - - Which imports both the lex and yacc modules. - Change suggested by Lee June. - -09/13/06: beazley - Changed the handling of negative indices when used in production rules. - A negative production index now accesses already parsed symbols on the - parsing stack. For example, - - def p_foo(p): - "foo: A B C D" - print p[1] # Value of 'A' symbol - print p[2] # Value of 'B' symbol - print p[-1] # Value of whatever symbol appears before A - # on the parsing stack. - - p[0] = some_val # Sets the value of the 'foo' grammer symbol - - This behavior makes it easier to work with embedded actions within the - parsing rules. For example, in C-yacc, it is possible to write code like - this: - - bar: A { printf("seen an A = %d\n", $1); } B { do_stuff; } - - In this example, the printf() code executes immediately after A has been - parsed. Within the embedded action code, $1 refers to the A symbol on - the stack. - - To perform this equivalent action in PLY, you need to write a pair - of rules like this: - - def p_bar(p): - "bar : A seen_A B" - do_stuff - - def p_seen_A(p): - "seen_A :" - print "seen an A =", p[-1] - - The second rule "seen_A" is merely a empty production which should be - reduced as soon as A is parsed in the "bar" rule above. The use - of the negative index p[-1] is used to access whatever symbol appeared - before the seen_A symbol. - - This feature also makes it possible to support inherited attributes. - For example: - - def p_decl(p): - "decl : scope name" - - def p_scope(p): - """scope : GLOBAL - | LOCAL""" - p[0] = p[1] - - def p_name(p): - "name : ID" - if p[-1] == "GLOBAL": - # ... - else if p[-1] == "LOCAL": - #... - - In this case, the name rule is inheriting an attribute from the - scope declaration that precedes it. - - *** POTENTIAL INCOMPATIBILITY *** - If you are currently using negative indices within existing grammar rules, - your code will break. This should be extremely rare if non-existent in - most cases. The argument to various grammar rules is not usually not - processed in the same way as a list of items. - -Version 2.0 ------------------------------- -09/07/06: beazley - Major cleanup and refactoring of the LR table generation code. Both SLR - and LALR(1) table generation is now performed by the same code base with - only minor extensions for extra LALR(1) processing. - -09/07/06: beazley - Completely reimplemented the entire LALR(1) parsing engine to use the - DeRemer and Pennello algorithm for calculating lookahead sets. This - significantly improves the performance of generating LALR(1) tables - and has the added feature of actually working correctly! If you - experienced weird behavior with LALR(1) in prior releases, this should - hopefully resolve all of those problems. Many thanks to - Andrew Waters and Markus Schoepflin for submitting bug reports - and helping me test out the revised LALR(1) support. - -Version 1.8 ------------------------------- -08/02/06: beazley - Fixed a problem related to the handling of default actions in LALR(1) - parsing. If you experienced subtle and/or bizarre behavior when trying - to use the LALR(1) engine, this may correct those problems. Patch - contributed by Russ Cox. Note: This patch has been superceded by - revisions for LALR(1) parsing in Ply-2.0. - -08/02/06: beazley - Added support for slicing of productions in yacc. - Patch contributed by Patrick Mezard. - -Version 1.7 ------------------------------- -03/02/06: beazley - Fixed infinite recursion problem ReduceToTerminals() function that - would sometimes come up in LALR(1) table generation. Reported by - Markus Schoepflin. - -03/01/06: beazley - Added "reflags" argument to lex(). For example: - - lex.lex(reflags=re.UNICODE) - - This can be used to specify optional flags to the re.compile() function - used inside the lexer. This may be necessary for special situations such - as processing Unicode (e.g., if you want escapes like \w and \b to consult - the Unicode character property database). The need for this suggested by - Andreas Jung. - -03/01/06: beazley - Fixed a bug with an uninitialized variable on repeated instantiations of parser - objects when the write_tables=0 argument was used. Reported by Michael Brown. - -03/01/06: beazley - Modified lex.py to accept Unicode strings both as the regular expressions for - tokens and as input. Hopefully this is the only change needed for Unicode support. - Patch contributed by Johan Dahl. - -03/01/06: beazley - Modified the class-based interface to work with new-style or old-style classes. - Patch contributed by Michael Brown (although I tweaked it slightly so it would work - with older versions of Python). - -Version 1.6 ------------------------------- -05/27/05: beazley - Incorporated patch contributed by Christopher Stawarz to fix an extremely - devious bug in LALR(1) parser generation. This patch should fix problems - numerous people reported with LALR parsing. - -05/27/05: beazley - Fixed problem with lex.py copy constructor. Reported by Dave Aitel, Aaron Lav, - and Thad Austin. - -05/27/05: beazley - Added outputdir option to yacc() to control output directory. Contributed - by Christopher Stawarz. - -05/27/05: beazley - Added rununit.py test script to run tests using the Python unittest module. - Contributed by Miki Tebeka. - -Version 1.5 ------------------------------- -05/26/04: beazley - Major enhancement. LALR(1) parsing support is now working. - This feature was implemented by Elias Ioup (ezioup@alumni.uchicago.edu) - and optimized by David Beazley. To use LALR(1) parsing do - the following: - - yacc.yacc(method="LALR") - - Computing LALR(1) parsing tables takes about twice as long as - the default SLR method. However, LALR(1) allows you to handle - more complex grammars. For example, the ANSI C grammar - (in example/ansic) has 13 shift-reduce conflicts with SLR, but - only has 1 shift-reduce conflict with LALR(1). - -05/20/04: beazley - Added a __len__ method to parser production lists. Can - be used in parser rules like this: - - def p_somerule(p): - """a : B C D - | E F" - if (len(p) == 3): - # Must have been first rule - elif (len(p) == 2): - # Must be second rule - - Suggested by Joshua Gerth and others. - -Version 1.4 ------------------------------- -04/23/04: beazley - Incorporated a variety of patches contributed by Eric Raymond. - These include: - - 0. Cleans up some comments so they don't wrap on an 80-column display. - 1. Directs compiler errors to stderr where they belong. - 2. Implements and documents automatic line counting when \n is ignored. - 3. Changes the way progress messages are dumped when debugging is on. - The new format is both less verbose and conveys more information than - the old, including shift and reduce actions. - -04/23/04: beazley - Added a Python setup.py file to simply installation. Contributed - by Adam Kerrison. - -04/23/04: beazley - Added patches contributed by Adam Kerrison. - - - Some output is now only shown when debugging is enabled. This - means that PLY will be completely silent when not in debugging mode. - - - An optional parameter "write_tables" can be passed to yacc() to - control whether or not parsing tables are written. By default, - it is true, but it can be turned off if you don't want the yacc - table file. Note: disabling this will cause yacc() to regenerate - the parsing table each time. - -04/23/04: beazley - Added patches contributed by David McNab. This patch addes two - features: - - - The parser can be supplied as a class instead of a module. - For an example of this, see the example/classcalc directory. - - - Debugging output can be directed to a filename of the user's - choice. Use - - yacc(debugfile="somefile.out") - - -Version 1.3 ------------------------------- -12/10/02: jmdyck - Various minor adjustments to the code that Dave checked in today. - Updated test/yacc_{inf,unused}.exp to reflect today's changes. - -12/10/02: beazley - Incorporated a variety of minor bug fixes to empty production - handling and infinite recursion checking. Contributed by - Michael Dyck. - -12/10/02: beazley - Removed bogus recover() method call in yacc.restart() - -Version 1.2 ------------------------------- -11/27/02: beazley - Lexer and parser objects are now available as an attribute - of tokens and slices respectively. For example: - - def t_NUMBER(t): - r'\d+' - print t.lexer - - def p_expr_plus(t): - 'expr: expr PLUS expr' - print t.lexer - print t.parser - - This can be used for state management (if needed). - -10/31/02: beazley - Modified yacc.py to work with Python optimize mode. To make - this work, you need to use - - yacc.yacc(optimize=1) - - Furthermore, you need to first run Python in normal mode - to generate the necessary parsetab.py files. After that, - you can use python -O or python -OO. - - Note: optimized mode turns off a lot of error checking. - Only use when you are sure that your grammar is working. - Make sure parsetab.py is up to date! - -10/30/02: beazley - Added cloning of Lexer objects. For example: - - import copy - l = lex.lex() - lc = copy.copy(l) - - l.input("Some text") - lc.input("Some other text") - ... - - This might be useful if the same "lexer" is meant to - be used in different contexts---or if multiple lexers - are running concurrently. - -10/30/02: beazley - Fixed subtle bug with first set computation and empty productions. - Patch submitted by Michael Dyck. - -10/30/02: beazley - Fixed error messages to use "filename:line: message" instead - of "filename:line. message". This makes error reporting more - friendly to emacs. Patch submitted by Franois Pinard. - -10/30/02: beazley - Improvements to parser.out file. Terminals and nonterminals - are sorted instead of being printed in random order. - Patch submitted by Franois Pinard. - -10/30/02: beazley - Improvements to parser.out file output. Rules are now printed - in a way that's easier to understand. Contributed by Russ Cox. - -10/30/02: beazley - Added 'nonassoc' associativity support. This can be used - to disable the chaining of operators like a < b < c. - To use, simply specify 'nonassoc' in the precedence table - - precedence = ( - ('nonassoc', 'LESSTHAN', 'GREATERTHAN'), # Nonassociative operators - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('right', 'UMINUS'), # Unary minus operator - ) - - Patch contributed by Russ Cox. - -10/30/02: beazley - Modified the lexer to provide optional support for Python -O and -OO - modes. To make this work, Python *first* needs to be run in - unoptimized mode. This reads the lexing information and creates a - file "lextab.py". Then, run lex like this: - - # module foo.py - ... - ... - lex.lex(optimize=1) - - Once the lextab file has been created, subsequent calls to - lex.lex() will read data from the lextab file instead of using - introspection. In optimized mode (-O, -OO) everything should - work normally despite the loss of doc strings. - - To change the name of the file 'lextab.py' use the following: - - lex.lex(lextab="footab") - - (this creates a file footab.py) - - -Version 1.1 October 25, 2001 ------------------------------- - -10/25/01: beazley - Modified the table generator to produce much more compact data. - This should greatly reduce the size of the parsetab.py[c] file. - Caveat: the tables still need to be constructed so a little more - work is done in parsetab on import. - -10/25/01: beazley - There may be a possible bug in the cycle detector that reports errors - about infinite recursion. I'm having a little trouble tracking it - down, but if you get this problem, you can disable the cycle - detector as follows: - - yacc.yacc(check_recursion = 0) - -10/25/01: beazley - Fixed a bug in lex.py that sometimes caused illegal characters to be - reported incorrectly. Reported by Sverre Jrgensen. - -7/8/01 : beazley - Added a reference to the underlying lexer object when tokens are handled by - functions. The lexer is available as the 'lexer' attribute. This - was added to provide better lexing support for languages such as Fortran - where certain types of tokens can't be conveniently expressed as regular - expressions (and where the tokenizing function may want to perform a - little backtracking). Suggested by Pearu Peterson. - -6/20/01 : beazley - Modified yacc() function so that an optional starting symbol can be specified. - For example: - - yacc.yacc(start="statement") - - Normally yacc always treats the first production rule as the starting symbol. - However, if you are debugging your grammar it may be useful to specify - an alternative starting symbol. Idea suggested by Rich Salz. - -Version 1.0 June 18, 2001 --------------------------- -Initial public offering - diff --git a/xonsh/ply/CONTRIBUTING.md b/xonsh/ply/CONTRIBUTING.md deleted file mode 100644 index 6da57c4..0000000 --- a/xonsh/ply/CONTRIBUTING.md +++ /dev/null @@ -1,18 +0,0 @@ -Contributing to PLY -=================== - -PLY is a mature project that no longer makes releases. New features -are no longer being added to it. However, if you feel that you have -found a bug in PLY or its documentation, please submit an issue or a -pull request. - -Important note: The Github repo for PLY always contains the most -up-to-date version of the software. If you want to use the current -version, you should COPY the contents of the `ply/` directory into -your own project and use it. There will be no future package-installable -releases of PLY. - - - - - diff --git a/xonsh/ply/Makefile b/xonsh/ply/Makefile deleted file mode 100644 index b13d007..0000000 --- a/xonsh/ply/Makefile +++ /dev/null @@ -1,17 +0,0 @@ -PYTHON ?= python - -test: - cd test && $(PYTHON) testlex.py - cd test && $(PYTHON) testyacc.py - -wheel: - $(PYTHON) setup.py bdist_wheel - -sdist: - $(PYTHON) setup.py sdist - -upload: wheel sdist - $(PYTHON) setup.py bdist_wheel upload - $(PYTHON) setup.py sdist upload - -.PHONY: test wheel sdist upload diff --git a/xonsh/ply/README.md b/xonsh/ply/README.md deleted file mode 100644 index f640487..0000000 --- a/xonsh/ply/README.md +++ /dev/null @@ -1,274 +0,0 @@ -# PLY (Python Lex-Yacc) - -[![Build Status](https://travis-ci.org/dabeaz/ply.svg?branch=master)](https://travis-ci.org/dabeaz/ply) - -Copyright (C) 2001-2019 -David M. Beazley (Dabeaz LLC) -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -* Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. -* Neither the name of the David Beazley or Dabeaz LLC may be used to - endorse or promote products derived from this software without - specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Introduction -============ - -PLY is a 100% Python implementation of the common parsing tools lex -and yacc. Here are a few highlights: - - - PLY is very closely modeled after traditional lex/yacc. - If you know how to use these tools in C, you will find PLY - to be similar. - - - PLY provides *very* extensive error reporting and diagnostic - information to assist in parser construction. The original - implementation was developed for instructional purposes. As - a result, the system tries to identify the most common types - of errors made by novice users. - - - PLY provides full support for empty productions, error recovery, - precedence specifiers, and moderately ambiguous grammars. - - - Parsing is based on LR-parsing which is fast, memory efficient, - better suited to large grammars, and which has a number of nice - properties when dealing with syntax errors and other parsing problems. - Currently, PLY builds its parsing tables using the LALR(1) - algorithm used in yacc. - - - PLY uses Python introspection features to build lexers and parsers. - This greatly simplifies the task of parser construction since it reduces - the number of files and eliminates the need to run a separate lex/yacc - tool before running your program. - - - PLY can be used to build parsers for "real" programming languages. - Although it is not ultra-fast due to its Python implementation, - PLY can be used to parse grammars consisting of several hundred - rules (as might be found for a language like C). The lexer and LR - parser are also reasonably efficient when parsing typically - sized programs. People have used PLY to build parsers for - C, C++, ADA, and other real programming languages. - -How to Use -========== - -PLY consists of two files : lex.py and yacc.py. These are contained -within the 'ply' directory which may also be used as a Python package. -To use PLY, simply copy the 'ply' directory to your project and import -lex and yacc from the associated 'ply' package. For example: - - from .ply import lex - from .ply import yacc - -Alternatively, you can copy just the files lex.py and yacc.py -individually and use them as modules however you see fit. For example: - - import lex - import yacc - -PLY has no third-party dependencies. - -The file doc/ply.html contains complete documentation on how to use -the system. - -The example directory contains several different examples including a -PLY specification for ANSI C as given in K&R 2nd Ed. - -A simple example is found at the end of this document - -Requirements -============ -PLY requires the use of Python 2.6 or greater. However, you should -use the latest Python release if possible. It should work on just -about any platform. PLY has been tested with both CPython and Jython. -It also seems to work with IronPython. - -Resources -========= -More information about PLY can be obtained on the PLY webpage at: - - http://www.dabeaz.com/ply - -For a detailed overview of parsing theory, consult the excellent -book "Compilers : Principles, Techniques, and Tools" by Aho, Sethi, and -Ullman. The topics found in "Lex & Yacc" by Levine, Mason, and Brown -may also be useful. - -The GitHub page for PLY can be found at: - - https://github.com/dabeaz/ply - -An old and inactive discussion group for PLY is found at: - - http://groups.google.com/group/ply-hack - -Acknowledgments -=============== -A special thanks is in order for all of the students in CS326 who -suffered through about 25 different versions of these tools :-). - -The CHANGES file acknowledges those who have contributed patches. - -Elias Ioup did the first implementation of LALR(1) parsing in PLY-1.x. -Andrew Waters and Markus Schoepflin were instrumental in reporting bugs -and testing a revised LALR(1) implementation for PLY-2.0. - -Special Note for PLY-3.0 -======================== -PLY-3.0 the first PLY release to support Python 3. However, backwards -compatibility with Python 2.6 is still preserved. PLY provides dual -Python 2/3 compatibility by restricting its implementation to a common -subset of basic language features. You should not convert PLY using -2to3--it is not necessary and may in fact break the implementation. - -Example -======= - -Here is a simple example showing a PLY implementation of a calculator -with variables. - - # ----------------------------------------------------------------------------- - # calc.py - # - # A simple calculator with variables. - # ----------------------------------------------------------------------------- - - tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - - # Tokens - - t_PLUS = r'\+' - t_MINUS = r'-' - t_TIMES = r'\*' - t_DIVIDE = r'/' - t_EQUALS = r'=' - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(t): - r'\d+' - t.value = int(t.value) - return t - - # Ignored characters - t_ignore = " \t" - - def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - # Build the lexer - import ply.lex as lex - lex.lex() - - # Precedence rules for the arithmetic operators - precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - - # dictionary of names (for storing variables) - names = { } - - def p_statement_assign(p): - 'statement : NAME EQUALS expression' - names[p[1]] = p[3] - - def p_statement_expr(p): - 'statement : expression' - print(p[1]) - - def p_expression_binop(p): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if p[2] == '+' : p[0] = p[1] + p[3] - elif p[2] == '-': p[0] = p[1] - p[3] - elif p[2] == '*': p[0] = p[1] * p[3] - elif p[2] == '/': p[0] = p[1] / p[3] - - def p_expression_uminus(p): - 'expression : MINUS expression %prec UMINUS' - p[0] = -p[2] - - def p_expression_group(p): - 'expression : LPAREN expression RPAREN' - p[0] = p[2] - - def p_expression_number(p): - 'expression : NUMBER' - p[0] = p[1] - - def p_expression_name(p): - 'expression : NAME' - try: - p[0] = names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - def p_error(p): - print("Syntax error at '%s'" % p.value) - - import ply.yacc as yacc - yacc.yacc() - - while True: - try: - s = raw_input('calc > ') # use input() on Python 3 - except EOFError: - break - yacc.parse(s) - - -Bug Reports and Patches -======================= -My goal with PLY is to simply have a decent lex/yacc implementation -for Python. As a general rule, I don't spend huge amounts of time -working on it unless I receive very specific bug reports and/or -patches to fix problems. At this time, PLY is mature software and new -features are no longer being added. If you think you have found a -bug, please visit the PLY Github page at https://github.com/dabeaz/ply -to report an issue. - --- Dave - - - - - - - - - diff --git a/xonsh/ply/__init__.py b/xonsh/ply/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/doc/internal.html b/xonsh/ply/doc/internal.html deleted file mode 100644 index 57e87df..0000000 --- a/xonsh/ply/doc/internal.html +++ /dev/null @@ -1,874 +0,0 @@ - - -PLY Internals - - - -

PLY Internals

- - -David M. Beazley
-dave@dabeaz.com
-
- -

-PLY Version: 3.11 -

- - -

- - - -

1. Introduction

- - -This document describes classes and functions that make up the internal -operation of PLY. Using this programming interface, it is possible to -manually build an parser using a different interface specification -than what PLY normally uses. For example, you could build a gramar -from information parsed in a completely different input format. Some of -these objects may be useful for building more advanced parsing engines -such as GLR. - -

-It should be stressed that using PLY at this level is not for the -faint of heart. Generally, it's assumed that you know a bit of -the underlying compiler theory and how an LR parser is put together. - -

2. Grammar Class

- - -The file ply.yacc defines a class Grammar that -is used to hold and manipulate information about a grammar -specification. It encapsulates the same basic information -about a grammar that is put into a YACC file including -the list of tokens, precedence rules, and grammar rules. -Various operations are provided to perform different validations -on the grammar. In addition, there are operations to compute -the first and follow sets that are needed by the various table -generation algorithms. - -

-Grammar(terminals) - -

-Creates a new grammar object. terminals is a list of strings -specifying the terminals for the grammar. An instance g of -Grammar has the following methods: -
- -

-g.set_precedence(term,assoc,level) -

-Sets the precedence level and associativity for a given terminal term. -assoc is one of 'right', -'left', or 'nonassoc' and level is a positive integer. The higher -the value of level, the higher the precedence. Here is an example of typical -precedence settings: - -
-g.set_precedence('PLUS',  'left',1)
-g.set_precedence('MINUS', 'left',1)
-g.set_precedence('TIMES', 'left',2)
-g.set_precedence('DIVIDE','left',2)
-g.set_precedence('UMINUS','left',3)
-
- -This method must be called prior to adding any productions to the -grammar with g.add_production(). The precedence of individual grammar -rules is determined by the precedence of the right-most terminal. - -
-

-g.add_production(name,syms,func=None,file='',line=0) -

-Adds a new grammar rule. name is the name of the rule, -syms is a list of symbols making up the right hand -side of the rule, func is the function to call when -reducing the rule. file and line specify -the filename and line number of the rule and are used for -generating error messages. - -

-The list of symbols in syms may include character -literals and %prec specifiers. Here are some -examples: - -

-g.add_production('expr',['expr','PLUS','term'],func,file,line)
-g.add_production('expr',['expr','"+"','term'],func,file,line)
-g.add_production('expr',['MINUS','expr','%prec','UMINUS'],func,file,line)
-
- -

-If any kind of error is detected, a GrammarError exception -is raised with a message indicating the reason for the failure. -

- -

-g.set_start(start=None) -

-Sets the starting rule for the grammar. start is a string -specifying the name of the start rule. If start is omitted, -the first grammar rule added with add_production() is taken to be -the starting rule. This method must always be called after all -productions have been added. -
- -

-g.find_unreachable() -

-Diagnostic function. Returns a list of all unreachable non-terminals -defined in the grammar. This is used to identify inactive parts of -the grammar specification. -
- -

-g.infinite_cycle() -

-Diagnostic function. Returns a list of all non-terminals in the -grammar that result in an infinite cycle. This condition occurs if -there is no way for a grammar rule to expand to a string containing -only terminal symbols. -
- -

-g.undefined_symbols() -

-Diagnostic function. Returns a list of tuples (name, prod) -corresponding to undefined symbols in the grammar. name is the -name of the undefined symbol and prod is an instance of -Production which has information about the production rule -where the undefined symbol was used. -
- -

-g.unused_terminals() -

-Diagnostic function. Returns a list of terminals that were defined, -but never used in the grammar. -
- -

-g.unused_rules() -

-Diagnostic function. Returns a list of Production instances -corresponding to production rules that were defined in the grammar, -but never used anywhere. This is slightly different -than find_unreachable(). -
- -

-g.unused_precedence() -

-Diagnostic function. Returns a list of tuples (term, assoc) -corresponding to precedence rules that were set, but never used the -grammar. term is the terminal name and assoc is the -precedence associativity (e.g., 'left', 'right', -or 'nonassoc'. -
- -

-g.compute_first() -

-Compute all of the first sets for all symbols in the grammar. Returns a dictionary -mapping symbol names to a list of all first symbols. -
- -

-g.compute_follow() -

-Compute all of the follow sets for all non-terminals in the grammar. -The follow set is the set of all possible symbols that might follow a -given non-terminal. Returns a dictionary mapping non-terminal names -to a list of symbols. -
- -

-g.build_lritems() -

-Calculates all of the LR items for all productions in the grammar. This -step is required before using the grammar for any kind of table generation. -See the section on LR items below. -
- -

-The following attributes are set by the above methods and may be useful -in code that works with the grammar. All of these attributes should be -assumed to be read-only. Changing their values directly will likely -break the grammar. - -

-g.Productions -

-A list of all productions added. The first entry is reserved for -a production representing the starting rule. The objects in this list -are instances of the Production class, described shortly. -
- -

-g.Prodnames -

-A dictionary mapping the names of nonterminals to a list of all -productions of that nonterminal. -
- -

-g.Terminals -

-A dictionary mapping the names of terminals to a list of the -production numbers where they are used. -
- -

-g.Nonterminals -

-A dictionary mapping the names of nonterminals to a list of the -production numbers where they are used. -
- -

-g.First -

-A dictionary representing the first sets for all grammar symbols. This is -computed and returned by the compute_first() method. -
- -

-g.Follow -

-A dictionary representing the follow sets for all grammar rules. This is -computed and returned by the compute_follow() method. -
- -

-g.Start -

-Starting symbol for the grammar. Set by the set_start() method. -
- -For the purposes of debugging, a Grammar object supports the __len__() and -__getitem__() special methods. Accessing g[n] returns the nth production -from the grammar. - - -

3. Productions

- - -Grammar objects store grammar rules as instances of a Production class. This -class has no public constructor--you should only create productions by calling Grammar.add_production(). -The following attributes are available on a Production instance p. - -

-p.name -

-The name of the production. For a grammar rule such as A : B C D, this is 'A'. -
- -

-p.prod -

-A tuple of symbols making up the right-hand side of the production. For a grammar rule such as A : B C D, this is ('B','C','D'). -
- -

-p.number -

-Production number. An integer containing the index of the production in the grammar's Productions list. -
- -

-p.func -

-The name of the reduction function associated with the production. -This is the function that will execute when reducing the entire -grammar rule during parsing. -
- -

-p.callable -

-The callable object associated with the name in p.func. This is None -unless the production has been bound using bind(). -
- -

-p.file -

-Filename associated with the production. Typically this is the file where the production was defined. Used for error messages. -
- -

-p.lineno -

-Line number associated with the production. Typically this is the line number in p.file where the production was defined. Used for error messages. -
- -

-p.prec -

-Precedence and associativity associated with the production. This is a tuple (assoc,level) where -assoc is one of 'left','right', or 'nonassoc' and level is -an integer. This value is determined by the precedence of the right-most terminal symbol in the production -or by use of the %prec specifier when adding the production. -
- -

-p.usyms -

-A list of all unique symbols found in the production. -
- -

-p.lr_items -

-A list of all LR items for this production. This attribute only has a meaningful value if the -Grammar.build_lritems() method has been called. The items in this list are -instances of LRItem described below. -
- -

-p.lr_next -

-The head of a linked-list representation of the LR items in p.lr_items. -This attribute only has a meaningful value if the Grammar.build_lritems() -method has been called. Each LRItem instance has a lr_next attribute -to move to the next item. The list is terminated by None. -
- -

-p.bind(dict) -

-Binds the production function name in p.func to a callable object in -dict. This operation is typically carried out in the last step -prior to running the parsing engine and is needed since parsing tables are typically -read from files which only include the function names, not the functions themselves. -
- -

-Production objects support -the __len__(), __getitem__(), and __str__() -special methods. -len(p) returns the number of symbols in p.prod -and p[n] is the same as p.prod[n]. - -

4. LRItems

- - -The construction of parsing tables in an LR-based parser generator is primarily -done over a set of "LR Items". An LR item represents a stage of parsing one -of the grammar rules. To compute the LR items, it is first necessary to -call Grammar.build_lritems(). Once this step, all of the productions -in the grammar will have their LR items attached to them. - -

-Here is an interactive example that shows what LR items look like if you -interactively experiment. In this example, g is a Grammar -object. - -

-
->>> g.build_lritems()
->>> p = g[1]
->>> p
-Production(statement -> ID = expr)
->>>
-
-
- -In the above code, p represents the first grammar rule. In -this case, a rule 'statement -> ID = expr'. - -

-Now, let's look at the LR items for p. - -

-
->>> p.lr_items
-[LRItem(statement -> . ID = expr), 
- LRItem(statement -> ID . = expr), 
- LRItem(statement -> ID = . expr), 
- LRItem(statement -> ID = expr .)]
->>>
-
-
- -In each LR item, the dot (.) represents a specific stage of parsing. In each LR item, the dot -is advanced by one symbol. It is only when the dot reaches the very end that a production -is successfully parsed. - -

-An instance lr of LRItem has the following -attributes that hold information related to that specific stage of -parsing. - -

-lr.name -

-The name of the grammar rule. For example, 'statement' in the above example. -
- -

-lr.prod -

-A tuple of symbols representing the right-hand side of the production, including the -special '.' character. For example, ('ID','.','=','expr'). -
- -

-lr.number -

-An integer representing the production number in the grammar. -
- -

-lr.usyms -

-A set of unique symbols in the production. Inherited from the original Production instance. -
- -

-lr.lr_index -

-An integer representing the position of the dot (.). You should never use lr.prod.index() -to search for it--the result will be wrong if the grammar happens to also use (.) as a character -literal. -
- -

-lr.lr_after -

-A list of all productions that can legally appear immediately to the right of the -dot (.). This list contains Production instances. This attribute -represents all of the possible branches a parse can take from the current position. -For example, suppose that lr represents a stage immediately before -an expression like this: - -
->>> lr
-LRItem(statement -> ID = . expr)
->>>
-
- -Then, the value of lr.lr_after might look like this, showing all productions that -can legally appear next: - -
->>> lr.lr_after
-[Production(expr -> expr PLUS expr), 
- Production(expr -> expr MINUS expr), 
- Production(expr -> expr TIMES expr), 
- Production(expr -> expr DIVIDE expr), 
- Production(expr -> MINUS expr), 
- Production(expr -> LPAREN expr RPAREN), 
- Production(expr -> NUMBER), 
- Production(expr -> ID)]
->>>
-
- -
- -

-lr.lr_before -

-The grammar symbol that appears immediately before the dot (.) or None if -at the beginning of the parse. -
- -

-lr.lr_next -

-A link to the next LR item, representing the next stage of the parse. None if lr -is the last LR item. -
- -LRItem instances also support the __len__() and __getitem__() special methods. -len(lr) returns the number of items in lr.prod including the dot (.). lr[n] -returns lr.prod[n]. - -

-It goes without saying that all of the attributes associated with LR -items should be assumed to be read-only. Modifications will very -likely create a small black-hole that will consume you and your code. - -

5. LRTable

- - -The LRTable class is used to represent LR parsing table data. This -minimally includes the production list, action table, and goto table. - -

-LRTable() -

-Create an empty LRTable object. This object contains only the information needed to -run an LR parser. -
- -An instance lrtab of LRTable has the following methods: - -

-lrtab.read_table(module) -

-Populates the LR table with information from the module specified in module. -module is either a module object already loaded with import or -the name of a Python module. If it's a string containing a module name, it is -loaded and parsing data is extracted. Returns the signature value that was used -when initially writing the tables. Raises a VersionError exception if -the module was created using an incompatible version of PLY. -
- -

-lrtab.bind_callables(dict) -

-This binds all of the function names used in productions to callable objects -found in the dictionary dict. During table generation and when reading -LR tables from files, PLY only uses the names of action functions such as 'p_expr', -'p_statement', etc. In order to actually run the parser, these names -have to be bound to callable objects. This method is always called prior to -running a parser. -
- -After lrtab has been populated, the following attributes are defined. - -

-lrtab.lr_method -

-The LR parsing method used (e.g., 'LALR') -
- - -

-lrtab.lr_productions -

-The production list. If the parsing tables have been newly -constructed, this will be a list of Production instances. If -the parsing tables have been read from a file, it's a list -of MiniProduction instances. This, together -with lr_action and lr_goto contain all of the -information needed by the LR parsing engine. -
- -

-lrtab.lr_action -

-The LR action dictionary that implements the underlying state machine. -The keys of this dictionary are the LR states. -
- -

-lrtab.lr_goto -

-The LR goto table that contains information about grammar rule reductions. -
- - -

6. LRGeneratedTable

- - -The LRGeneratedTable class represents constructed LR parsing tables on a -grammar. It is a subclass of LRTable. - -

-LRGeneratedTable(grammar, method='LALR',log=None) -

-Create the LR parsing tables on a grammar. grammar is an instance of Grammar, -method is a string with the parsing method ('SLR' or 'LALR'), and -log is a logger object used to write debugging information. The debugging information -written to log is the same as what appears in the parser.out file created -by yacc. By supplying a custom logger with a different message format, it is possible to get -more information (e.g., the line number in yacc.py used for issuing each line of -output in the log). The result is an instance of LRGeneratedTable. -
- -

-An instance lr of LRGeneratedTable has the following attributes. - -

-lr.grammar -

-A link to the Grammar object used to construct the parsing tables. -
- -

-lr.lr_method -

-The LR parsing method used (e.g., 'LALR') -
- - -

-lr.lr_productions -

-A reference to grammar.Productions. This, together with lr_action and lr_goto -contain all of the information needed by the LR parsing engine. -
- -

-lr.lr_action -

-The LR action dictionary that implements the underlying state machine. The keys of this dictionary are -the LR states. -
- -

-lr.lr_goto -

-The LR goto table that contains information about grammar rule reductions. -
- -

-lr.sr_conflicts -

-A list of tuples (state,token,resolution) identifying all shift/reduce conflicts. state is the LR state -number where the conflict occurred, token is the token causing the conflict, and resolution is -a string describing the resolution taken. resolution is either 'shift' or 'reduce'. -
- -

-lr.rr_conflicts -

-A list of tuples (state,rule,rejected) identifying all reduce/reduce conflicts. state is the -LR state number where the conflict occurred, rule is the production rule that was selected -and rejected is the production rule that was rejected. Both rule and rejected are -instances of Production. They can be inspected to provide the user with more information. -
- -

-There are two public methods of LRGeneratedTable. - -

-lr.write_table(modulename,outputdir="",signature="") -

-Writes the LR parsing table information to a Python module. modulename is a string -specifying the name of a module such as "parsetab". outputdir is the name of a -directory where the module should be created. signature is a string representing a -grammar signature that's written into the output file. This can be used to detect when -the data stored in a module file is out-of-sync with the the grammar specification (and that -the tables need to be regenerated). If modulename is a string "parsetab", -this function creates a file called parsetab.py. If the module name represents a -package such as "foo.bar.parsetab", then only the last component, "parsetab" is -used. -
- - -

7. LRParser

- - -The LRParser class implements the low-level LR parsing engine. - - -

-LRParser(lrtab, error_func) -

-Create an LRParser. lrtab is an instance of LRTable -containing the LR production and state tables. error_func is the -error function to invoke in the event of a parsing error. -
- -An instance p of LRParser has the following methods: - -

-p.parse(input=None,lexer=None,debug=0,tracking=0,tokenfunc=None) -

-Run the parser. input is a string, which if supplied is fed into the -lexer using its input() method. lexer is an instance of the -Lexer class to use for tokenizing. If not supplied, the last lexer -created with the lex module is used. debug is a boolean flag -that enables debugging. tracking is a boolean flag that tells the -parser to perform additional line number tracking. tokenfunc is a callable -function that returns the next token. If supplied, the parser will use it to get -all tokens. -
- -

-p.restart() -

-Resets the parser state for a parse already in progress. -
- -

8. ParserReflect

- - -

-The ParserReflect class is used to collect parser specification data -from a Python module or object. This class is what collects all of the -p_rule() functions in a PLY file, performs basic error checking, -and collects all of the needed information to build a grammar. Most of the -high-level PLY interface as used by the yacc() function is actually -implemented by this class. - -

-ParserReflect(pdict, log=None) -

-Creates a ParserReflect instance. pdict is a dictionary -containing parser specification data. This dictionary typically corresponds -to the module or class dictionary of code that implements a PLY parser. -log is a logger instance that will be used to report error -messages. -
- -An instance p of ParserReflect has the following methods: - -

-p.get_all() -

-Collect and store all required parsing information. -
- -

-p.validate_all() -

-Validate all of the collected parsing information. This is a seprate step -from p.get_all() as a performance optimization. In order to -increase parser start-up time, a parser can elect to only validate the -parsing data when regenerating the parsing tables. The validation -step tries to collect as much information as possible rather than -raising an exception at the first sign of trouble. The attribute -p.error is set if there are any validation errors. The -value of this attribute is also returned. -
- -

-p.signature() -

-Compute a signature representing the contents of the collected parsing -data. The signature value should change if anything in the parser -specification has changed in a way that would justify parser table -regeneration. This method can be called after p.get_all(), -but before p.validate_all(). -
- -The following attributes are set in the process of collecting data: - -

-p.start -

-The grammar start symbol, if any. Taken from pdict['start']. -
- -

-p.error_func -

-The error handling function or None. Taken from pdict['p_error']. -
- -

-p.tokens -

-The token list. Taken from pdict['tokens']. -
- -

-p.prec -

-The precedence specifier. Taken from pdict['precedence']. -
- -

-p.preclist -

-A parsed version of the precedence specified. A list of tuples of the form -(token,assoc,level) where token is the terminal symbol, -assoc is the associativity (e.g., 'left') and level -is a numeric precedence level. -
- -

-p.grammar -

-A list of tuples (name, rules) representing the grammar rules. name is the -name of a Python function or method in pdict that starts with "p_". -rules is a list of tuples (filename,line,prodname,syms) representing -the grammar rules found in the documentation string of that function. filename and line contain location -information that can be used for debugging. prodname is the name of the -production. syms is the right-hand side of the production. If you have a -function like this - -
-def p_expr(p):
-    '''expr : expr PLUS expr
-            | expr MINUS expr
-            | expr TIMES expr
-            | expr DIVIDE expr'''
-
- -then the corresponding entry in p.grammar might look like this: - -
-('p_expr', [ ('calc.py',10,'expr', ['expr','PLUS','expr']),
-             ('calc.py',11,'expr', ['expr','MINUS','expr']),
-             ('calc.py',12,'expr', ['expr','TIMES','expr']),
-             ('calc.py',13,'expr', ['expr','DIVIDE','expr'])
-           ])
-
-
- -

-p.pfuncs -

-A sorted list of tuples (line, file, name, doc) representing all of -the p_ functions found. line and file give location -information. name is the name of the function. doc is the -documentation string. This list is sorted in ascending order by line number. -
- -

-p.files -

-A dictionary holding all of the source filenames that were encountered -while collecting parser information. Only the keys of this dictionary have -any meaning. -
- -

-p.error -

-An attribute that indicates whether or not any critical errors -occurred in validation. If this is set, it means that that some kind -of problem was detected and that no further processing should be -performed. -
- - -

9. High-level operation

- - -Using all of the above classes requires some attention to detail. The yacc() -function carries out a very specific sequence of operations to create a grammar. -This same sequence should be emulated if you build an alternative PLY interface. - -
    -
  1. A ParserReflect object is created and raw grammar specification data is -collected. -
  2. A Grammar object is created and populated with information -from the specification data. -
  3. A LRGenerator object is created to run the LALR algorithm over -the Grammar object. -
  4. Productions in the LRGenerator and bound to callables using the bind_callables() -method. -
  5. A LRParser object is created from from the information in the -LRGenerator object. -
- - - - - - - - - - diff --git a/xonsh/ply/doc/makedoc.py b/xonsh/ply/doc/makedoc.py deleted file mode 100644 index e5cbdb0..0000000 --- a/xonsh/ply/doc/makedoc.py +++ /dev/null @@ -1,194 +0,0 @@ -#!/usr/local/bin/python - -############################################################################### -# Takes a chapter as input and adds internal links and numbering to all -# of the H1, H2, H3, H4 and H5 sections. -# -# Every heading HTML tag (H1, H2 etc) is given an autogenerated name to link -# to. However, if the name is not an autogenerated name from a previous run, -# it will be kept. If it is autogenerated, it might change on subsequent runs -# of this program. Thus if you want to create links to one of the headings, -# then change the heading link name to something that does not look like an -# autogenerated link name. -############################################################################### - -import sys -import re -import string - -############################################################################### -# Functions -############################################################################### - -# Regexs for -alink = re.compile(r"", re.IGNORECASE) -heading = re.compile(r"(_nn\d)", re.IGNORECASE) - -def getheadingname(m): - autogeneratedheading = True - if m.group(1) != None: - amatch = alink.match(m.group(1)) - if amatch: - # A non-autogenerated heading - keep it - headingname = amatch.group(1) - autogeneratedheading = heading.match(headingname) - if autogeneratedheading: - # The heading name was either non-existent or autogenerated, - # We can create a new heading / change the existing heading - headingname = "%s_nn%d" % (filenamebase, nameindex) - return headingname - -############################################################################### -# Main program -############################################################################### - -if len(sys.argv) != 2: - print "usage: makedoc.py filename" - sys.exit(1) - -filename = sys.argv[1] -filenamebase = string.split(filename,".")[0] - -section = 0 -subsection = 0 -subsubsection = 0 -subsubsubsection = 0 -nameindex = 0 - -name = "" - -# Regexs for

,...

sections - -h1 = re.compile(r".*?

()*[\d\.\s]*(.*?)

", re.IGNORECASE) -h2 = re.compile(r".*?

()*[\d\.\s]*(.*?)

", re.IGNORECASE) -h3 = re.compile(r".*?

()*[\d\.\s]*(.*?)

", re.IGNORECASE) -h4 = re.compile(r".*?

()*[\d\.\s]*(.*?)

", re.IGNORECASE) -h5 = re.compile(r".*?
()*[\d\.\s]*(.*?)
", re.IGNORECASE) - -# Make backup -with open(filename) as src, open(filename+".bak","w") as dst: - dst.write(src.read()) - -lines = data.splitlines() -result = [ ] # This is the result of postprocessing the file -index = "\n
\n" # index contains the index for adding at the top of the file. Also printed to stdout. - -skip = 0 -skipspace = 0 - -for s in lines: - if s == "": - if not skip: - result.append("@INDEX@") - skip = 1 - else: - skip = 0 - continue - if skip: - continue - - if not s and skipspace: - continue - - if skipspace: - result.append("") - result.append("") - skipspace = 0 - - m = h2.match(s) - if m: - prevheadingtext = m.group(2) - nameindex += 1 - section += 1 - headingname = getheadingname(m) - result.append("""

%d. %s

""" % (headingname,section, prevheadingtext)) - - if subsubsubsection: - index += "\n" - if subsubsection: - index += "\n" - if subsection: - index += "\n" - if section == 1: - index += "
    \n" - - index += """
  • %s\n""" % (headingname,prevheadingtext) - subsection = 0 - subsubsection = 0 - subsubsubsection = 0 - skipspace = 1 - continue - m = h3.match(s) - if m: - prevheadingtext = m.group(2) - nameindex += 1 - subsection += 1 - headingname = getheadingname(m) - result.append("""

    %d.%d %s

    """ % (headingname,section, subsection, prevheadingtext)) - - if subsubsubsection: - index += "
\n" - if subsubsection: - index += "\n" - if subsection == 1: - index += "
    \n" - - index += """
  • %s\n""" % (headingname,prevheadingtext) - subsubsection = 0 - skipspace = 1 - continue - m = h4.match(s) - if m: - prevheadingtext = m.group(2) - nameindex += 1 - subsubsection += 1 - subsubsubsection = 0 - headingname = getheadingname(m) - result.append("""

    %d.%d.%d %s

    """ % (headingname,section, subsection, subsubsection, prevheadingtext)) - - if subsubsubsection: - index += "
\n" - if subsubsection == 1: - index += "
    \n" - - index += """
  • %s\n""" % (headingname,prevheadingtext) - skipspace = 1 - continue - m = h5.match(s) - if m: - prevheadingtext = m.group(2) - nameindex += 1 - subsubsubsection += 1 - headingname = getheadingname(m) - result.append("""
    %d.%d.%d.%d %s
    """ % (headingname,section, subsection, subsubsection, subsubsubsection, prevheadingtext)) - - if subsubsubsection == 1: - index += "
      \n" - - index += """
    • %s\n""" % (headingname,prevheadingtext) - skipspace = 1 - continue - - result.append(s) - -if subsubsubsection: - index += "
    \n" - -if subsubsection: - index += "
\n" - -if subsection: - index += "\n" - -if section: - index += "\n" - -index += "
\n\n" - -data = "\n".join(result) - -data = data.replace("@INDEX@",index) + "\n" - -# Write the file back out -with open(filename,"w") as f: - f.write(data) diff --git a/xonsh/ply/doc/ply.html b/xonsh/ply/doc/ply.html deleted file mode 100644 index 6b8aca9..0000000 --- a/xonsh/ply/doc/ply.html +++ /dev/null @@ -1,3496 +0,0 @@ - - -PLY (Python Lex-Yacc) - - - -

PLY (Python Lex-Yacc)

- - -David M. Beazley
-dave@dabeaz.com
-
- -

-PLY Version: 3.11 -

- - -

- - - - - - - -

1. Preface and Requirements

- - -

-This document provides an overview of lexing and parsing with PLY. -Given the intrinsic complexity of parsing, I would strongly advise -that you read (or at least skim) this entire document before jumping -into a big development project with PLY. -

- -

-PLY-3.5 is compatible with both Python 2 and Python 3. If you are using -Python 2, you have to use Python 2.6 or newer. -

- -

2. Introduction

- - -PLY is a pure-Python implementation of the popular compiler -construction tools lex and yacc. The main goal of PLY is to stay -fairly faithful to the way in which traditional lex/yacc tools work. -This includes supporting LALR(1) parsing as well as providing -extensive input validation, error reporting, and diagnostics. Thus, -if you've used yacc in another programming language, it should be -relatively straightforward to use PLY. - -

-Early versions of PLY were developed to support an Introduction to -Compilers Course I taught in 2001 at the University of Chicago. -Since PLY was primarily developed as an instructional tool, you will -find it to be fairly picky about token and grammar rule -specification. In part, this -added formality is meant to catch common programming mistakes made by -novice users. However, advanced users will also find such features to -be useful when building complicated grammars for real programming -languages. It should also be noted that PLY does not provide much in -the way of bells and whistles (e.g., automatic construction of -abstract syntax trees, tree traversal, etc.). Nor would I consider it -to be a parsing framework. Instead, you will find a bare-bones, yet -fully capable lex/yacc implementation written entirely in Python. - -

-The rest of this document assumes that you are somewhat familiar with -parsing theory, syntax directed translation, and the use of compiler -construction tools such as lex and yacc in other programming -languages. If you are unfamiliar with these topics, you will probably -want to consult an introductory text such as "Compilers: Principles, -Techniques, and Tools", by Aho, Sethi, and Ullman. O'Reilly's "Lex -and Yacc" by John Levine may also be handy. In fact, the O'Reilly book can be -used as a reference for PLY as the concepts are virtually identical. - -

3. PLY Overview

- - -

-PLY consists of two separate modules; lex.py and -yacc.py, both of which are found in a Python package -called ply. The lex.py module is used to break input text into a -collection of tokens specified by a collection of regular expression -rules. yacc.py is used to recognize language syntax that has -been specified in the form of a context free grammar. -

- -

-The two tools are meant to work together. Specifically, -lex.py provides an external interface in the form of a -token() function that returns the next valid token on the -input stream. yacc.py calls this repeatedly to retrieve -tokens and invoke grammar rules. The output of yacc.py is -often an Abstract Syntax Tree (AST). However, this is entirely up to -the user. If desired, yacc.py can also be used to implement -simple one-pass compilers. - -

-Like its Unix counterpart, yacc.py provides most of the -features you expect including extensive error checking, grammar -validation, support for empty productions, error tokens, and ambiguity -resolution via precedence rules. In fact, almost everything that is possible in traditional yacc -should be supported in PLY. - -

-The primary difference between -yacc.py and Unix yacc is that yacc.py -doesn't involve a separate code-generation process. -Instead, PLY relies on reflection (introspection) -to build its lexers and parsers. Unlike traditional lex/yacc which -require a special input file that is converted into a separate source -file, the specifications given to PLY are valid Python -programs. This means that there are no extra source files nor is -there a special compiler construction step (e.g., running yacc to -generate Python code for the compiler). Since the generation of the -parsing tables is relatively expensive, PLY caches the results and -saves them to a file. If no changes are detected in the input source, -the tables are read from the cache. Otherwise, they are regenerated. - -

4. Lex

- - -lex.py is used to tokenize an input string. For example, suppose -you're writing a programming language and a user supplied the following input string: - -
-
-x = 3 + 42 * (s - t)
-
-
- -A tokenizer splits the string into individual tokens - -
-
-'x','=', '3', '+', '42', '*', '(', 's', '-', 't', ')'
-
-
- -Tokens are usually given names to indicate what they are. For example: - -
-
-'ID','EQUALS','NUMBER','PLUS','NUMBER','TIMES',
-'LPAREN','ID','MINUS','ID','RPAREN'
-
-
- -More specifically, the input is broken into pairs of token types and values. For example: - -
-
-('ID','x'), ('EQUALS','='), ('NUMBER','3'), 
-('PLUS','+'), ('NUMBER','42), ('TIMES','*'),
-('LPAREN','('), ('ID','s'), ('MINUS','-'),
-('ID','t'), ('RPAREN',')'
-
-
- -The identification of tokens is typically done by writing a series of regular expression -rules. The next section shows how this is done using lex.py. - -

4.1 Lex Example

- - -The following example shows how lex.py is used to write a simple tokenizer. - -
-
-# ------------------------------------------------------------
-# calclex.py
-#
-# tokenizer for a simple expression evaluator for
-# numbers and +,-,*,/
-# ------------------------------------------------------------
-import ply.lex as lex
-
-# List of token names.   This is always required
-tokens = (
-   'NUMBER',
-   'PLUS',
-   'MINUS',
-   'TIMES',
-   'DIVIDE',
-   'LPAREN',
-   'RPAREN',
-)
-
-# Regular expression rules for simple tokens
-t_PLUS    = r'\+'
-t_MINUS   = r'-'
-t_TIMES   = r'\*'
-t_DIVIDE  = r'/'
-t_LPAREN  = r'\('
-t_RPAREN  = r'\)'
-
-# A regular expression rule with some action code
-def t_NUMBER(t):
-    r'\d+'
-    t.value = int(t.value)    
-    return t
-
-# Define a rule so we can track line numbers
-def t_newline(t):
-    r'\n+'
-    t.lexer.lineno += len(t.value)
-
-# A string containing ignored characters (spaces and tabs)
-t_ignore  = ' \t'
-
-# Error handling rule
-def t_error(t):
-    print("Illegal character '%s'" % t.value[0])
-    t.lexer.skip(1)
-
-# Build the lexer
-lexer = lex.lex()
-
-
-
-To use the lexer, you first need to feed it some input text using -its input() method. After that, repeated calls -to token() produce tokens. The following code shows how this -works: - -
-
-
-# Test it out
-data = '''
-3 + 4 * 10
-  + -20 *2
-'''
-
-# Give the lexer some input
-lexer.input(data)
-
-# Tokenize
-while True:
-    tok = lexer.token()
-    if not tok: 
-        break      # No more input
-    print(tok)
-
-
- -When executed, the example will produce the following output: - -
-
-$ python example.py
-LexToken(NUMBER,3,2,1)
-LexToken(PLUS,'+',2,3)
-LexToken(NUMBER,4,2,5)
-LexToken(TIMES,'*',2,7)
-LexToken(NUMBER,10,2,10)
-LexToken(PLUS,'+',3,14)
-LexToken(MINUS,'-',3,16)
-LexToken(NUMBER,20,3,18)
-LexToken(TIMES,'*',3,20)
-LexToken(NUMBER,2,3,21)
-
-
- -Lexers also support the iteration protocol. So, you can write the above loop as follows: - -
-
-for tok in lexer:
-    print(tok)
-
-
- -The tokens returned by lexer.token() are instances -of LexToken. This object has -attributes tok.type, tok.value, -tok.lineno, and tok.lexpos. The following code shows an example of -accessing these attributes: - -
-
-# Tokenize
-while True:
-    tok = lexer.token()
-    if not tok: 
-        break      # No more input
-    print(tok.type, tok.value, tok.lineno, tok.lexpos)
-
-
- -The tok.type and tok.value attributes contain the -type and value of the token itself. -tok.lineno and tok.lexpos contain information about -the location of the token. tok.lexpos is the index of the -token relative to the start of the input text. - -

4.2 The tokens list

- - -

-All lexers must provide a list tokens that defines all of the possible token -names that can be produced by the lexer. This list is always required -and is used to perform a variety of validation checks. The tokens list is also used by the -yacc.py module to identify terminals. -

- -

-In the example, the following code specified the token names: - -

-
-tokens = (
-   'NUMBER',
-   'PLUS',
-   'MINUS',
-   'TIMES',
-   'DIVIDE',
-   'LPAREN',
-   'RPAREN',
-)
-
-
- -

4.3 Specification of tokens

- - -Each token is specified by writing a regular expression rule compatible with Python's re module. Each of these rules -are defined by making declarations with a special prefix t_ to indicate that it -defines a token. For simple tokens, the regular expression can -be specified as strings such as this (note: Python raw strings are used since they are the -most convenient way to write regular expression strings): - -
-
-t_PLUS = r'\+'
-
-
- -In this case, the name following the t_ must exactly match one of the -names supplied in tokens. If some kind of action needs to be performed, -a token rule can be specified as a function. For example, this rule matches numbers and -converts the string into a Python integer. - -
-
-def t_NUMBER(t):
-    r'\d+'
-    t.value = int(t.value)
-    return t
-
-
- -When a function is used, the regular expression rule is specified in the function documentation string. -The function always takes a single argument which is an instance of -LexToken. This object has attributes of t.type which is the token type (as a string), -t.value which is the lexeme (the actual text matched), t.lineno which is the current line number, and t.lexpos which -is the position of the token relative to the beginning of the input text. -By default, t.type is set to the name following the t_ prefix. The action -function can modify the contents of the LexToken object as appropriate. However, -when it is done, the resulting token should be returned. If no value is returned by the action -function, the token is simply discarded and the next token read. - -

-Internally, lex.py uses the re module to do its pattern matching. Patterns are compiled -using the re.VERBOSE flag which can be used to help readability. However, be aware that unescaped -whitespace is ignored and comments are allowed in this mode. If your pattern involves whitespace, make sure you -use \s. If you need to match the # character, use [#]. -

- -

-When building the master regular expression, -rules are added in the following order: -

- -

-

    -
  1. All tokens defined by functions are added in the same order as they appear in the lexer file. -
  2. Tokens defined by strings are added next by sorting them in order of decreasing regular expression length (longer expressions -are added first). -
-

-Without this ordering, it can be difficult to correctly match certain types of tokens. For example, if you -wanted to have separate tokens for "=" and "==", you need to make sure that "==" is checked first. By sorting regular -expressions in order of decreasing length, this problem is solved for rules defined as strings. For functions, -the order can be explicitly controlled since rules appearing first are checked first. - -

-To handle reserved words, you should write a single rule to match an -identifier and do a special name lookup in a function like this: - -

-
-reserved = {
-   'if' : 'IF',
-   'then' : 'THEN',
-   'else' : 'ELSE',
-   'while' : 'WHILE',
-   ...
-}
-
-tokens = ['LPAREN','RPAREN',...,'ID'] + list(reserved.values())
-
-def t_ID(t):
-    r'[a-zA-Z_][a-zA-Z_0-9]*'
-    t.type = reserved.get(t.value,'ID')    # Check for reserved words
-    return t
-
-
- -This approach greatly reduces the number of regular expression rules and is likely to make things a little faster. - -

-Note: You should avoid writing individual rules for reserved words. For example, if you write rules like this, - -

-
-t_FOR   = r'for'
-t_PRINT = r'print'
-
-
- -those rules will be triggered for identifiers that include those words as a prefix such as "forget" or "printed". This is probably not -what you want. - -

4.4 Token values

- - -When tokens are returned by lex, they have a value that is stored in the value attribute. Normally, the value is the text -that was matched. However, the value can be assigned to any Python object. For instance, when lexing identifiers, you may -want to return both the identifier name and information from some sort of symbol table. To do this, you might write a rule like this: - -
-
-def t_ID(t):
-    ...
-    # Look up symbol table information and return a tuple
-    t.value = (t.value, symbol_lookup(t.value))
-    ...
-    return t
-
-
- -It is important to note that storing data in other attribute names is not recommended. The yacc.py module only exposes the -contents of the value attribute. Thus, accessing other attributes may be unnecessarily awkward. If you -need to store multiple values on a token, assign a tuple, dictionary, or instance to value. - -

4.5 Discarded tokens

- - -To discard a token, such as a comment, simply define a token rule that returns no value. For example: - -
-
-def t_COMMENT(t):
-    r'\#.*'
-    pass
-    # No return value. Token discarded
-
-
- -Alternatively, you can include the prefix "ignore_" in the token declaration to force a token to be ignored. For example: - -
-
-t_ignore_COMMENT = r'\#.*'
-
-
- -Be advised that if you are ignoring many different kinds of text, you may still want to use functions since these provide more precise -control over the order in which regular expressions are matched (i.e., functions are matched in order of specification whereas strings are -sorted by regular expression length). - -

4.6 Line numbers and positional information

- - -

By default, lex.py knows nothing about line numbers. This is because lex.py doesn't know anything -about what constitutes a "line" of input (e.g., the newline character or even if the input is textual data). -To update this information, you need to write a special rule. In the example, the t_newline() rule shows how to do this. - -

-
-# Define a rule so we can track line numbers
-def t_newline(t):
-    r'\n+'
-    t.lexer.lineno += len(t.value)
-
-
-Within the rule, the lineno attribute of the underlying lexer t.lexer is updated. -After the line number is updated, the token is simply discarded since nothing is returned. - -

-lex.py does not perform any kind of automatic column tracking. However, it does record positional -information related to each token in the lexpos attribute. Using this, it is usually possible to compute -column information as a separate step. For instance, just count backwards until you reach a newline. - -

-
-# Compute column.
-#     input is the input text string
-#     token is a token instance
-def find_column(input, token):
-    line_start = input.rfind('\n', 0, token.lexpos) + 1
-    return (token.lexpos - line_start) + 1
-
-
- -Since column information is often only useful in the context of error handling, calculating the column -position can be performed when needed as opposed to doing it for each token. - -

4.7 Ignored characters

- - -

-The special t_ignore rule is reserved by lex.py for characters -that should be completely ignored in the input stream. -Usually this is used to skip over whitespace and other non-essential characters. -Although it is possible to define a regular expression rule for whitespace in a manner -similar to t_newline(), the use of t_ignore provides substantially better -lexing performance because it is handled as a special case and is checked in a much -more efficient manner than the normal regular expression rules. -

- -

-The characters given in t_ignore are not ignored when such characters are part of -other regular expression patterns. For example, if you had a rule to capture quoted text, -that pattern can include the ignored characters (which will be captured in the normal way). The -main purpose of t_ignore is to ignore whitespace and other padding between the -tokens that you actually want to parse. -

- -

4.8 Literal characters

- - -

-Literal characters can be specified by defining a variable literals in your lexing module. For example: - -

-
-literals = [ '+','-','*','/' ]
-
-
- -or alternatively - -
-
-literals = "+-*/"
-
-
- -A literal character is simply a single character that is returned "as is" when encountered by the lexer. Literals are checked -after all of the defined regular expression rules. Thus, if a rule starts with one of the literal characters, it will always -take precedence. - -

-When a literal token is returned, both its type and value attributes are set to the character itself. For example, '+'. -

- -

-It's possible to write token functions that perform additional actions -when literals are matched. However, you'll need to set the token type -appropriately. For example: -

- -
-
-literals = [ '{', '}' ]
-
-def t_lbrace(t):
-    r'\{'
-    t.type = '{'      # Set token type to the expected literal
-    return t
-
-def t_rbrace(t):
-    r'\}'
-    t.type = '}'      # Set token type to the expected literal
-    return t
-
-
- -

4.9 Error handling

- - -

-The t_error() -function is used to handle lexing errors that occur when illegal -characters are detected. In this case, the t.value attribute contains the -rest of the input string that has not been tokenized. In the example, the error function -was defined as follows: - -

-
-# Error handling rule
-def t_error(t):
-    print("Illegal character '%s'" % t.value[0])
-    t.lexer.skip(1)
-
-
- -In this case, we simply print the offending character and skip ahead one character by calling t.lexer.skip(1). - -

4.10 EOF Handling

- - -

-The t_eof() function is used to handle an end-of-file (EOF) condition in the input. As input, it -receives a token type 'eof' with the lineno and lexpos attributes set appropriately. -The main use of this function is provide more input to the lexer so that it can continue to parse. Here is an -example of how this works: -

- -
-
-# EOF handling rule
-def t_eof(t):
-    # Get more input (Example)
-    more = raw_input('... ')
-    if more:
-        self.lexer.input(more)
-        return self.lexer.token()
-    return None
-
-
- -

-The EOF function should return the next available token (by calling self.lexer.token()) or None to -indicate no more data. Be aware that setting more input with the self.lexer.input() method does -NOT reset the lexer state or the lineno attribute used for position tracking. The lexpos -attribute is reset so be aware of that if you're using it in error reporting. -

- -

4.11 Building and using the lexer

- - -

-To build the lexer, the function lex.lex() is used. For example:

- -
-
-lexer = lex.lex()
-
-
- -

This function -uses Python reflection (or introspection) to read the regular expression rules -out of the calling context and build the lexer. Once the lexer has been built, two methods can -be used to control the lexer. -

-
    -
  • lexer.input(data). Reset the lexer and store a new input string. -
  • lexer.token(). Return the next token. Returns a special LexToken instance on success or -None if the end of the input text has been reached. -
- -

4.12 The @TOKEN decorator

- - -In some applications, you may want to define build tokens from as a series of -more complex regular expression rules. For example: - -
-
-digit            = r'([0-9])'
-nondigit         = r'([_A-Za-z])'
-identifier       = r'(' + nondigit + r'(' + digit + r'|' + nondigit + r')*)'        
-
-def t_ID(t):
-    # want docstring to be identifier above. ?????
-    ...
-
-
- -In this case, we want the regular expression rule for ID to be one of the variables above. However, there is no -way to directly specify this using a normal documentation string. To solve this problem, you can use the @TOKEN -decorator. For example: - -
-
-from ply.lex import TOKEN
-
-@TOKEN(identifier)
-def t_ID(t):
-    ...
-
-
- -

-This will attach identifier to the docstring for t_ID() allowing lex.py to work normally. -

- -

4.13 Optimized mode

- - -For improved performance, it may be desirable to use Python's -optimized mode (e.g., running Python with the -O -option). However, doing so causes Python to ignore documentation -strings. This presents special problems for lex.py. To -handle this case, you can create your lexer using -the optimize option as follows: - -
-
-lexer = lex.lex(optimize=1)
-
-
- -Next, run Python in its normal operating mode. When you do -this, lex.py will write a file called lextab.py in -the same directory as the module containing the lexer specification. -This file contains all of the regular -expression rules and tables used during lexing. On subsequent -executions, -lextab.py will simply be imported to build the lexer. This -approach substantially improves the startup time of the lexer and it -works in Python's optimized mode. - -

-To change the name of the lexer-generated module, use the lextab keyword argument. For example: -

- -
-
-lexer = lex.lex(optimize=1,lextab="footab")
-
-
- -When running in optimized mode, it is important to note that lex disables most error checking. Thus, this is really only recommended -if you're sure everything is working correctly and you're ready to start releasing production code. - -

4.14 Debugging

- - -For the purpose of debugging, you can run lex() in a debugging mode as follows: - -
-
-lexer = lex.lex(debug=1)
-
-
- -

-This will produce various sorts of debugging information including all of the added rules, -the master regular expressions used by the lexer, and tokens generating during lexing. -

- -

-In addition, lex.py comes with a simple main function which -will either tokenize input read from standard input or from a file specified -on the command line. To use it, simply put this in your lexer: -

- -
-
-if __name__ == '__main__':
-     lex.runmain()
-
-
- -Please refer to the "Debugging" section near the end for some more advanced details -of debugging. - -

4.15 Alternative specification of lexers

- - -As shown in the example, lexers are specified all within one Python module. If you want to -put token rules in a different module from the one in which you invoke lex(), use the -module keyword argument. - -

-For example, you might have a dedicated module that just contains -the token rules: - -

-
-# module: tokrules.py
-# This module just contains the lexing rules
-
-# List of token names.   This is always required
-tokens = (
-   'NUMBER',
-   'PLUS',
-   'MINUS',
-   'TIMES',
-   'DIVIDE',
-   'LPAREN',
-   'RPAREN',
-)
-
-# Regular expression rules for simple tokens
-t_PLUS    = r'\+'
-t_MINUS   = r'-'
-t_TIMES   = r'\*'
-t_DIVIDE  = r'/'
-t_LPAREN  = r'\('
-t_RPAREN  = r'\)'
-
-# A regular expression rule with some action code
-def t_NUMBER(t):
-    r'\d+'
-    t.value = int(t.value)    
-    return t
-
-# Define a rule so we can track line numbers
-def t_newline(t):
-    r'\n+'
-    t.lexer.lineno += len(t.value)
-
-# A string containing ignored characters (spaces and tabs)
-t_ignore  = ' \t'
-
-# Error handling rule
-def t_error(t):
-    print("Illegal character '%s'" % t.value[0])
-    t.lexer.skip(1)
-
-
- -Now, if you wanted to build a tokenizer from these rules from within a different module, you would do the following (shown for Python interactive mode): - -
-
->>> import tokrules
->>> lexer = lex.lex(module=tokrules)
->>> lexer.input("3 + 4")
->>> lexer.token()
-LexToken(NUMBER,3,1,1,0)
->>> lexer.token()
-LexToken(PLUS,'+',1,2)
->>> lexer.token()
-LexToken(NUMBER,4,1,4)
->>> lexer.token()
-None
->>>
-
-
- -The module option can also be used to define lexers from instances of a class. For example: - -
-
-import ply.lex as lex
-
-class MyLexer(object):
-    # List of token names.   This is always required
-    tokens = (
-       'NUMBER',
-       'PLUS',
-       'MINUS',
-       'TIMES',
-       'DIVIDE',
-       'LPAREN',
-       'RPAREN',
-    )
-
-    # Regular expression rules for simple tokens
-    t_PLUS    = r'\+'
-    t_MINUS   = r'-'
-    t_TIMES   = r'\*'
-    t_DIVIDE  = r'/'
-    t_LPAREN  = r'\('
-    t_RPAREN  = r'\)'
-
-    # A regular expression rule with some action code
-    # Note addition of self parameter since we're in a class
-    def t_NUMBER(self,t):
-        r'\d+'
-        t.value = int(t.value)    
-        return t
-
-    # Define a rule so we can track line numbers
-    def t_newline(self,t):
-        r'\n+'
-        t.lexer.lineno += len(t.value)
-
-    # A string containing ignored characters (spaces and tabs)
-    t_ignore  = ' \t'
-
-    # Error handling rule
-    def t_error(self,t):
-        print("Illegal character '%s'" % t.value[0])
-        t.lexer.skip(1)
-
-    # Build the lexer
-    def build(self,**kwargs):
-        self.lexer = lex.lex(module=self, **kwargs)
-    
-    # Test it output
-    def test(self,data):
-        self.lexer.input(data)
-        while True:
-             tok = self.lexer.token()
-             if not tok: 
-                 break
-             print(tok)
-
-# Build the lexer and try it out
-m = MyLexer()
-m.build()           # Build the lexer
-m.test("3 + 4")     # Test it
-
-
- - -When building a lexer from class, you should construct the lexer from -an instance of the class, not the class object itself. This is because -PLY only works properly if the lexer actions are defined by bound-methods. - -

-When using the module option to lex(), PLY collects symbols -from the underlying object using the dir() function. There is no -direct access to the __dict__ attribute of the object supplied as a -module value.

- -

-Finally, if you want to keep things nicely encapsulated, but don't want to use a -full-fledged class definition, lexers can be defined using closures. For example: - -

-
-import ply.lex as lex
-
-# List of token names.   This is always required
-tokens = (
-  'NUMBER',
-  'PLUS',
-  'MINUS',
-  'TIMES',
-  'DIVIDE',
-  'LPAREN',
-  'RPAREN',
-)
-
-def MyLexer():
-    # Regular expression rules for simple tokens
-    t_PLUS    = r'\+'
-    t_MINUS   = r'-'
-    t_TIMES   = r'\*'
-    t_DIVIDE  = r'/'
-    t_LPAREN  = r'\('
-    t_RPAREN  = r'\)'
-
-    # A regular expression rule with some action code
-    def t_NUMBER(t):
-        r'\d+'
-        t.value = int(t.value)    
-        return t
-
-    # Define a rule so we can track line numbers
-    def t_newline(t):
-        r'\n+'
-        t.lexer.lineno += len(t.value)
-
-    # A string containing ignored characters (spaces and tabs)
-    t_ignore  = ' \t'
-
-    # Error handling rule
-    def t_error(t):
-        print("Illegal character '%s'" % t.value[0])
-        t.lexer.skip(1)
-
-    # Build the lexer from my environment and return it    
-    return lex.lex()
-
-
- -

-Important note: If you are defining a lexer using a class or closure, be aware that PLY still requires you to only -define a single lexer per module (source file). There are extensive validation/error checking parts of the PLY that -may falsely report error messages if you don't follow this rule. -

- -

4.16 Maintaining state

- - -In your lexer, you may want to maintain a variety of state -information. This might include mode settings, symbol tables, and -other details. As an example, suppose that you wanted to keep -track of how many NUMBER tokens had been encountered. - -

-One way to do this is to keep a set of global variables in the module -where you created the lexer. For example: - -

-
-num_count = 0
-def t_NUMBER(t):
-    r'\d+'
-    global num_count
-    num_count += 1
-    t.value = int(t.value)    
-    return t
-
-
- -If you don't like the use of a global variable, another place to store -information is inside the Lexer object created by lex(). -To this, you can use the lexer attribute of tokens passed to -the various rules. For example: - -
-
-def t_NUMBER(t):
-    r'\d+'
-    t.lexer.num_count += 1     # Note use of lexer attribute
-    t.value = int(t.value)    
-    return t
-
-lexer = lex.lex()
-lexer.num_count = 0            # Set the initial count
-
-
- -This latter approach has the advantage of being simple and working -correctly in applications where multiple instantiations of a given -lexer exist in the same application. However, this might also feel -like a gross violation of encapsulation to OO purists. -Just to put your mind at some ease, all -internal attributes of the lexer (with the exception of lineno) have names that are prefixed -by lex (e.g., lexdata,lexpos, etc.). Thus, -it is perfectly safe to store attributes in the lexer that -don't have names starting with that prefix or a name that conflicts with one of the -predefined methods (e.g., input(), token(), etc.). - -

-If you don't like assigning values on the lexer object, you can define your lexer as a class as -shown in the previous section: - -

-
-class MyLexer:
-    ...
-    def t_NUMBER(self,t):
-        r'\d+'
-        self.num_count += 1
-        t.value = int(t.value)    
-        return t
-
-    def build(self, **kwargs):
-        self.lexer = lex.lex(object=self,**kwargs)
-
-    def __init__(self):
-        self.num_count = 0
-
-
- -The class approach may be the easiest to manage if your application is -going to be creating multiple instances of the same lexer and you need -to manage a lot of state. - -

-State can also be managed through closures. For example, in Python 3: - -

-
-def MyLexer():
-    num_count = 0
-    ...
-    def t_NUMBER(t):
-        r'\d+'
-        nonlocal num_count
-        num_count += 1
-        t.value = int(t.value)    
-        return t
-    ...
-
-
- -

4.17 Lexer cloning

- - -

-If necessary, a lexer object can be duplicated by invoking its clone() method. For example: - -

-
-lexer = lex.lex()
-...
-newlexer = lexer.clone()
-
-
- -When a lexer is cloned, the copy is exactly identical to the original lexer -including any input text and internal state. However, the clone allows a -different set of input text to be supplied which may be processed separately. -This may be useful in situations when you are writing a parser/compiler that -involves recursive or reentrant processing. For instance, if you -needed to scan ahead in the input for some reason, you could create a -clone and use it to look ahead. Or, if you were implementing some kind of preprocessor, -cloned lexers could be used to handle different input files. - -

-Creating a clone is different than calling lex.lex() in that -PLY doesn't regenerate any of the internal tables or regular expressions. - -

-Special considerations need to be made when cloning lexers that also -maintain their own internal state using classes or closures. Namely, -you need to be aware that the newly created lexers will share all of -this state with the original lexer. For example, if you defined a -lexer as a class and did this: - -

-
-m = MyLexer()
-a = lex.lex(object=m)      # Create a lexer
-
-b = a.clone()              # Clone the lexer
-
-
- -Then both a and b are going to be bound to the same -object m and any changes to m will be reflected in both lexers. It's -important to emphasize that clone() is only meant to create a new lexer -that reuses the regular expressions and environment of another lexer. If you -need to make a totally new copy of a lexer, then call lex() again. - -

4.18 Internal lexer state

- - -A Lexer object lexer has a number of internal attributes that may be useful in certain -situations. - -

-lexer.lexpos -

-This attribute is an integer that contains the current position within the input text. If you modify -the value, it will change the result of the next call to token(). Within token rule functions, this points -to the first character after the matched text. If the value is modified within a rule, the next returned token will be -matched at the new position. -
- -

-lexer.lineno -

-The current value of the line number attribute stored in the lexer. PLY only specifies that the attribute -exists---it never sets, updates, or performs any processing with it. If you want to track line numbers, -you will need to add code yourself (see the section on line numbers and positional information). -
- -

-lexer.lexdata -

-The current input text stored in the lexer. This is the string passed with the input() method. It -would probably be a bad idea to modify this unless you really know what you're doing. -
- -

-lexer.lexmatch -

-This is the raw Match object returned by the Python re.match() function (used internally by PLY) for the -current token. If you have written a regular expression that contains named groups, you can use this to retrieve those values. -Note: This attribute is only updated when tokens are defined and processed by functions. -
- -

4.19 Conditional lexing and start conditions

- - -In advanced parsing applications, it may be useful to have different -lexing states. For instance, you may want the occurrence of a certain -token or syntactic construct to trigger a different kind of lexing. -PLY supports a feature that allows the underlying lexer to be put into -a series of different states. Each state can have its own tokens, -lexing rules, and so forth. The implementation is based largely on -the "start condition" feature of GNU flex. Details of this can be found -at http://flex.sourceforge.net/manual/Start-Conditions.html. - -

-To define a new lexing state, it must first be declared. This is done by including a "states" declaration in your -lex file. For example: - -

-
-states = (
-   ('foo','exclusive'),
-   ('bar','inclusive'),
-)
-
-
- -This declaration declares two states, 'foo' -and 'bar'. States may be of two types; 'exclusive' -and 'inclusive'. An exclusive state completely overrides the -default behavior of the lexer. That is, lex will only return tokens -and apply rules defined specifically for that state. An inclusive -state adds additional tokens and rules to the default set of rules. -Thus, lex will return both the tokens defined by default in addition -to those defined for the inclusive state. - -

-Once a state has been declared, tokens and rules are declared by including the -state name in token/rule declaration. For example: - -

-
-t_foo_NUMBER = r'\d+'                      # Token 'NUMBER' in state 'foo'        
-t_bar_ID     = r'[a-zA-Z_][a-zA-Z0-9_]*'   # Token 'ID' in state 'bar'
-
-def t_foo_newline(t):
-    r'\n'
-    t.lexer.lineno += 1
-
-
- -A token can be declared in multiple states by including multiple state names in the declaration. For example: - -
-
-t_foo_bar_NUMBER = r'\d+'         # Defines token 'NUMBER' in both state 'foo' and 'bar'
-
-
- -Alternative, a token can be declared in all states using the 'ANY' in the name. - -
-
-t_ANY_NUMBER = r'\d+'         # Defines a token 'NUMBER' in all states
-
-
- -If no state name is supplied, as is normally the case, the token is associated with a special state 'INITIAL'. For example, -these two declarations are identical: - -
-
-t_NUMBER = r'\d+'
-t_INITIAL_NUMBER = r'\d+'
-
-
- -

-States are also associated with the special t_ignore, t_error(), and t_eof() declarations. For example, if a state treats -these differently, you can declare:

- -
-
-t_foo_ignore = " \t\n"       # Ignored characters for state 'foo'
-
-def t_bar_error(t):          # Special error handler for state 'bar'
-    pass 
-
-
- -By default, lexing operates in the 'INITIAL' state. This state includes all of the normally defined tokens. -For users who aren't using different states, this fact is completely transparent. If, during lexing or parsing, you want to change -the lexing state, use the begin() method. For example: - -
-
-def t_begin_foo(t):
-    r'start_foo'
-    t.lexer.begin('foo')             # Starts 'foo' state
-
-
- -To get out of a state, you use begin() to switch back to the initial state. For example: - -
-
-def t_foo_end(t):
-    r'end_foo'
-    t.lexer.begin('INITIAL')        # Back to the initial state
-
-
- -The management of states can also be done with a stack. For example: - -
-
-def t_begin_foo(t):
-    r'start_foo'
-    t.lexer.push_state('foo')             # Starts 'foo' state
-
-def t_foo_end(t):
-    r'end_foo'
-    t.lexer.pop_state()                   # Back to the previous state
-
-
- -

-The use of a stack would be useful in situations where there are many ways of entering a new lexing state and you merely want to go back -to the previous state afterwards. - -

-An example might help clarify. Suppose you were writing a parser and you wanted to grab sections of arbitrary C code enclosed by -curly braces. That is, whenever you encounter a starting brace '{', you want to read all of the enclosed code up to the ending brace '}' -and return it as a string. Doing this with a normal regular expression rule is nearly (if not actually) impossible. This is because braces can -be nested and can be included in comments and strings. Thus, simply matching up to the first matching '}' character isn't good enough. Here is how -you might use lexer states to do this: - -

-
-# Declare the state
-states = (
-  ('ccode','exclusive'),
-)
-
-# Match the first {. Enter ccode state.
-def t_ccode(t):
-    r'\{'
-    t.lexer.code_start = t.lexer.lexpos        # Record the starting position
-    t.lexer.level = 1                          # Initial brace level
-    t.lexer.begin('ccode')                     # Enter 'ccode' state
-
-# Rules for the ccode state
-def t_ccode_lbrace(t):     
-    r'\{'
-    t.lexer.level +=1                
-
-def t_ccode_rbrace(t):
-    r'\}'
-    t.lexer.level -=1
-
-    # If closing brace, return the code fragment
-    if t.lexer.level == 0:
-         t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos+1]
-         t.type = "CCODE"
-         t.lexer.lineno += t.value.count('\n')
-         t.lexer.begin('INITIAL')           
-         return t
-
-# C or C++ comment (ignore)    
-def t_ccode_comment(t):
-    r'(/\*(.|\n)*?\*/)|(//.*)'
-    pass
-
-# C string
-def t_ccode_string(t):
-   r'\"([^\\\n]|(\\.))*?\"'
-
-# C character literal
-def t_ccode_char(t):
-   r'\'([^\\\n]|(\\.))*?\''
-
-# Any sequence of non-whitespace characters (not braces, strings)
-def t_ccode_nonspace(t):
-   r'[^\s\{\}\'\"]+'
-
-# Ignored characters (whitespace)
-t_ccode_ignore = " \t\n"
-
-# For bad characters, we just skip over it
-def t_ccode_error(t):
-    t.lexer.skip(1)
-
-
- -In this example, the occurrence of the first '{' causes the lexer to record the starting position and enter a new state 'ccode'. A collection of rules then match -various parts of the input that follow (comments, strings, etc.). All of these rules merely discard the token (by not returning a value). -However, if the closing right brace is encountered, the rule t_ccode_rbrace collects all of the code (using the earlier recorded starting -position), stores it, and returns a token 'CCODE' containing all of that text. When returning the token, the lexing state is restored back to its -initial state. - -

4.20 Miscellaneous Issues

- - -

-

  • The lexer requires input to be supplied as a single input string. Since most machines have more than enough memory, this -rarely presents a performance concern. However, it means that the lexer currently can't be used with streaming data -such as open files or sockets. This limitation is primarily a side-effect of using the re module. You might be -able to work around this by implementing an appropriate def t_eof() end-of-file handling rule. The main complication -here is that you'll probably need to ensure that data is fed to the lexer in a way so that it doesn't split in in the middle -of a token.

    - -

    -

  • The lexer should work properly with both Unicode strings given as token and pattern matching rules as -well as for input text. - -

    -

  • If you need to supply optional flags to the re.compile() function, use the reflags option to lex. For example: - -
    -
    -lex.lex(reflags=re.UNICODE | re.VERBOSE)
    -
    -
    - -Note: by default, reflags is set to re.VERBOSE. If you provide -your own flags, you may need to include this for PLY to preserve its normal behavior. - -

    -

  • Since the lexer is written entirely in Python, its performance is -largely determined by that of the Python re module. Although -the lexer has been written to be as efficient as possible, it's not -blazingly fast when used on very large input files. If -performance is concern, you might consider upgrading to the most -recent version of Python, creating a hand-written lexer, or offloading -the lexer into a C extension module. - -

    -If you are going to create a hand-written lexer and you plan to use it with yacc.py, -it only needs to conform to the following requirements: - -

      -
    • It must provide a token() method that returns the next token or None if no more -tokens are available. -
    • The token() method must return an object tok that has type and value attributes. If -line number tracking is being used, then the token should also define a lineno attribute. -
    - -

    5. Parsing basics

    - - -yacc.py is used to parse language syntax. Before showing an -example, there are a few important bits of background that must be -mentioned. First, syntax is usually specified in terms of a BNF grammar. -For example, if you wanted to parse -simple arithmetic expressions, you might first write an unambiguous -grammar specification like this: - -
    -
     
    -expression : expression + term
    -           | expression - term
    -           | term
    -
    -term       : term * factor
    -           | term / factor
    -           | factor
    -
    -factor     : NUMBER
    -           | ( expression )
    -
    -
    - -In the grammar, symbols such as NUMBER, +, -, *, and / are known -as terminals and correspond to raw input tokens. Identifiers such as term and factor refer to -grammar rules comprised of a collection of terminals and other rules. These identifiers are known as non-terminals. -

    - -The semantic behavior of a language is often specified using a -technique known as syntax directed translation. In syntax directed -translation, attributes are attached to each symbol in a given grammar -rule along with an action. Whenever a particular grammar rule is -recognized, the action describes what to do. For example, given the -expression grammar above, you might write the specification for a -simple calculator like this: - -

    -
     
    -Grammar                             Action
    ---------------------------------    -------------------------------------------- 
    -expression0 : expression1 + term    expression0.val = expression1.val + term.val
    -            | expression1 - term    expression0.val = expression1.val - term.val
    -            | term                  expression0.val = term.val
    -
    -term0       : term1 * factor        term0.val = term1.val * factor.val
    -            | term1 / factor        term0.val = term1.val / factor.val
    -            | factor                term0.val = factor.val
    -
    -factor      : NUMBER                factor.val = int(NUMBER.lexval)
    -            | ( expression )        factor.val = expression.val
    -
    -
    - -A good way to think about syntax directed translation is to -view each symbol in the grammar as a kind of object. Associated -with each symbol is a value representing its "state" (for example, the -val attribute above). Semantic -actions are then expressed as a collection of functions or methods -that operate on the symbols and associated values. - -

    -Yacc uses a parsing technique known as LR-parsing or shift-reduce parsing. LR parsing is a -bottom up technique that tries to recognize the right-hand-side of various grammar rules. -Whenever a valid right-hand-side is found in the input, the appropriate action code is triggered and the -grammar symbols are replaced by the grammar symbol on the left-hand-side. - -

    -LR parsing is commonly implemented by shifting grammar symbols onto a -stack and looking at the stack and the next input token for patterns that -match one of the grammar rules. -The details of the algorithm can be found in a compiler textbook, but the -following example illustrates the steps that are performed if you -wanted to parse the expression -3 + 5 * (10 - 20) using the grammar defined above. In the example, -the special symbol $ represents the end of input. - - -

    -
    -Step Symbol Stack           Input Tokens            Action
    ----- ---------------------  ---------------------   -------------------------------
    -1                           3 + 5 * ( 10 - 20 )$    Shift 3
    -2    3                        + 5 * ( 10 - 20 )$    Reduce factor : NUMBER
    -3    factor                   + 5 * ( 10 - 20 )$    Reduce term   : factor
    -4    term                     + 5 * ( 10 - 20 )$    Reduce expr : term
    -5    expr                     + 5 * ( 10 - 20 )$    Shift +
    -6    expr +                     5 * ( 10 - 20 )$    Shift 5
    -7    expr + 5                     * ( 10 - 20 )$    Reduce factor : NUMBER
    -8    expr + factor                * ( 10 - 20 )$    Reduce term   : factor
    -9    expr + term                  * ( 10 - 20 )$    Shift *
    -10   expr + term *                  ( 10 - 20 )$    Shift (
    -11   expr + term * (                  10 - 20 )$    Shift 10
    -12   expr + term * ( 10                  - 20 )$    Reduce factor : NUMBER
    -13   expr + term * ( factor              - 20 )$    Reduce term : factor
    -14   expr + term * ( term                - 20 )$    Reduce expr : term
    -15   expr + term * ( expr                - 20 )$    Shift -
    -16   expr + term * ( expr -                20 )$    Shift 20
    -17   expr + term * ( expr - 20                )$    Reduce factor : NUMBER
    -18   expr + term * ( expr - factor            )$    Reduce term : factor
    -19   expr + term * ( expr - term              )$    Reduce expr : expr - term
    -20   expr + term * ( expr                     )$    Shift )
    -21   expr + term * ( expr )                    $    Reduce factor : (expr)
    -22   expr + term * factor                      $    Reduce term : term * factor
    -23   expr + term                               $    Reduce expr : expr + term
    -24   expr                                      $    Reduce expr
    -25                                             $    Success!
    -
    -
    - -When parsing the expression, an underlying state machine and the -current input token determine what happens next. If the next token -looks like part of a valid grammar rule (based on other items on the -stack), it is generally shifted onto the stack. If the top of the -stack contains a valid right-hand-side of a grammar rule, it is -usually "reduced" and the symbols replaced with the symbol on the -left-hand-side. When this reduction occurs, the appropriate action is -triggered (if defined). If the input token can't be shifted and the -top of stack doesn't match any grammar rules, a syntax error has -occurred and the parser must take some kind of recovery step (or bail -out). A parse is only successful if the parser reaches a state where -the symbol stack is empty and there are no more input tokens. - -

    -It is important to note that the underlying implementation is built -around a large finite-state machine that is encoded in a collection of -tables. The construction of these tables is non-trivial and -beyond the scope of this discussion. However, subtle details of this -process explain why, in the example above, the parser chooses to shift -a token onto the stack in step 9 rather than reducing the -rule expr : expr + term. - -

    6. Yacc

    - - -The ply.yacc module implements the parsing component of PLY. -The name "yacc" stands for "Yet Another Compiler Compiler" and is -borrowed from the Unix tool of the same name. - -

    6.1 An example

    - - -Suppose you wanted to make a grammar for simple arithmetic expressions as previously described. Here is -how you would do it with yacc.py: - -
    -
    -# Yacc example
    -
    -import ply.yacc as yacc
    -
    -# Get the token map from the lexer.  This is required.
    -from calclex import tokens
    -
    -def p_expression_plus(p):
    -    'expression : expression PLUS term'
    -    p[0] = p[1] + p[3]
    -
    -def p_expression_minus(p):
    -    'expression : expression MINUS term'
    -    p[0] = p[1] - p[3]
    -
    -def p_expression_term(p):
    -    'expression : term'
    -    p[0] = p[1]
    -
    -def p_term_times(p):
    -    'term : term TIMES factor'
    -    p[0] = p[1] * p[3]
    -
    -def p_term_div(p):
    -    'term : term DIVIDE factor'
    -    p[0] = p[1] / p[3]
    -
    -def p_term_factor(p):
    -    'term : factor'
    -    p[0] = p[1]
    -
    -def p_factor_num(p):
    -    'factor : NUMBER'
    -    p[0] = p[1]
    -
    -def p_factor_expr(p):
    -    'factor : LPAREN expression RPAREN'
    -    p[0] = p[2]
    -
    -# Error rule for syntax errors
    -def p_error(p):
    -    print("Syntax error in input!")
    -
    -# Build the parser
    -parser = yacc.yacc()
    -
    -while True:
    -   try:
    -       s = raw_input('calc > ')
    -   except EOFError:
    -       break
    -   if not s: continue
    -   result = parser.parse(s)
    -   print(result)
    -
    -
    - -In this example, each grammar rule is defined by a Python function -where the docstring to that function contains the appropriate -context-free grammar specification. The statements that make up the -function body implement the semantic actions of the rule. Each function -accepts a single argument p that is a sequence containing the -values of each grammar symbol in the corresponding rule. The values -of p[i] are mapped to grammar symbols as shown here: - -
    -
    -def p_expression_plus(p):
    -    'expression : expression PLUS term'
    -    #   ^            ^        ^    ^
    -    #  p[0]         p[1]     p[2] p[3]
    -
    -    p[0] = p[1] + p[3]
    -
    -
    - -

    -For tokens, the "value" of the corresponding p[i] is the -same as the p.value attribute assigned in the lexer -module. For non-terminals, the value is determined by whatever is -placed in p[0] when rules are reduced. This value can be -anything at all. However, it probably most common for the value to be -a simple Python type, a tuple, or an instance. In this example, we -are relying on the fact that the NUMBER token stores an -integer value in its value field. All of the other rules simply -perform various types of integer operations and propagate the result. -

    - -

    -Note: The use of negative indices have a special meaning in -yacc---specially p[-1] does not have the same value -as p[3] in this example. Please see the section on "Embedded -Actions" for further details. -

    - -

    -The first rule defined in the yacc specification determines the -starting grammar symbol (in this case, a rule for expression -appears first). Whenever the starting rule is reduced by the parser -and no more input is available, parsing stops and the final value is -returned (this value will be whatever the top-most rule placed -in p[0]). Note: an alternative starting symbol can be -specified using the start keyword argument to -yacc(). - -

    The p_error(p) rule is defined to catch syntax errors. -See the error handling section below for more detail. - -

    -To build the parser, call the yacc.yacc() function. This -function looks at the module and attempts to construct all of the LR -parsing tables for the grammar you have specified. The first -time yacc.yacc() is invoked, you will get a message such as -this: - -

    -
    -$ python calcparse.py
    -Generating LALR tables
    -calc > 
    -
    -
    - -

    -Since table construction is relatively expensive (especially for large -grammars), the resulting parsing table is written to -a file called parsetab.py. In addition, a -debugging file called parser.out is created. On subsequent -executions, yacc will reload the table from -parsetab.py unless it has detected a change in the underlying -grammar (in which case the tables and parsetab.py file are -regenerated). Both of these files are written to the same directory -as the module in which the parser is specified. -The name of the parsetab module can be changed using the -tabmodule keyword argument to yacc(). For example: -

    - -
    -
    -parser = yacc.yacc(tabmodule='fooparsetab')
    -
    -
    - -

    -If any errors are detected in your grammar specification, yacc.py will produce -diagnostic messages and possibly raise an exception. Some of the errors that can be detected include: - -

      -
    • Duplicated function names (if more than one rule function have the same name in the grammar file). -
    • Shift/reduce and reduce/reduce conflicts generated by ambiguous grammars. -
    • Badly specified grammar rules. -
    • Infinite recursion (rules that can never terminate). -
    • Unused rules and tokens -
    • Undefined rules and tokens -
    - -The next few sections discuss grammar specification in more detail. - -

    -The final part of the example shows how to actually run the parser -created by -yacc(). To run the parser, you simply have to call -the parse() with a string of input text. This will run all -of the grammar rules and return the result of the entire parse. This -result return is the value assigned to p[0] in the starting -grammar rule. - -

    6.2 Combining Grammar Rule Functions

    - - -When grammar rules are similar, they can be combined into a single function. -For example, consider the two rules in our earlier example: - -
    -
    -def p_expression_plus(p):
    -    'expression : expression PLUS term'
    -    p[0] = p[1] + p[3]
    -
    -def p_expression_minus(t):
    -    'expression : expression MINUS term'
    -    p[0] = p[1] - p[3]
    -
    -
    - -Instead of writing two functions, you might write a single function like this: - -
    -
    -def p_expression(p):
    -    '''expression : expression PLUS term
    -                  | expression MINUS term'''
    -    if p[2] == '+':
    -        p[0] = p[1] + p[3]
    -    elif p[2] == '-':
    -        p[0] = p[1] - p[3]
    -
    -
    - -In general, the doc string for any given function can contain multiple grammar rules. So, it would -have also been legal (although possibly confusing) to write this: - -
    -
    -def p_binary_operators(p):
    -    '''expression : expression PLUS term
    -                  | expression MINUS term
    -       term       : term TIMES factor
    -                  | term DIVIDE factor'''
    -    if p[2] == '+':
    -        p[0] = p[1] + p[3]
    -    elif p[2] == '-':
    -        p[0] = p[1] - p[3]
    -    elif p[2] == '*':
    -        p[0] = p[1] * p[3]
    -    elif p[2] == '/':
    -        p[0] = p[1] / p[3]
    -
    -
    - -When combining grammar rules into a single function, it is usually a good idea for all of the rules to have -a similar structure (e.g., the same number of terms). Otherwise, the corresponding action code may be more -complicated than necessary. However, it is possible to handle simple cases using len(). For example: - -
    -
    -def p_expressions(p):
    -    '''expression : expression MINUS expression
    -                  | MINUS expression'''
    -    if (len(p) == 4):
    -        p[0] = p[1] - p[3]
    -    elif (len(p) == 3):
    -        p[0] = -p[2]
    -
    -
    - -If parsing performance is a concern, you should resist the urge to put -too much conditional processing into a single grammar rule as shown in -these examples. When you add checks to see which grammar rule is -being handled, you are actually duplicating the work that the parser -has already performed (i.e., the parser already knows exactly what rule it -matched). You can eliminate this overhead by using a -separate p_rule() function for each grammar rule. - -

    6.3 Character Literals

    - - -If desired, a grammar may contain tokens defined as single character literals. For example: - -
    -
    -def p_binary_operators(p):
    -    '''expression : expression '+' term
    -                  | expression '-' term
    -       term       : term '*' factor
    -                  | term '/' factor'''
    -    if p[2] == '+':
    -        p[0] = p[1] + p[3]
    -    elif p[2] == '-':
    -        p[0] = p[1] - p[3]
    -    elif p[2] == '*':
    -        p[0] = p[1] * p[3]
    -    elif p[2] == '/':
    -        p[0] = p[1] / p[3]
    -
    -
    - -A character literal must be enclosed in quotes such as '+'. In addition, if literals are used, they must be declared in the -corresponding lex file through the use of a special literals declaration. - -
    -
    -# Literals.  Should be placed in module given to lex()
    -literals = ['+','-','*','/' ]
    -
    -
    - -Character literals are limited to a single character. Thus, it is not legal to specify literals such as '<=' or '=='. For this, use -the normal lexing rules (e.g., define a rule such as t_EQ = r'=='). - -

    6.4 Empty Productions

    - - -yacc.py can handle empty productions by defining a rule like this: - -
    -
    -def p_empty(p):
    -    'empty :'
    -    pass
    -
    -
    - -Now to use the empty production, simply use 'empty' as a symbol. For example: - -
    -
    -def p_optitem(p):
    -    'optitem : item'
    -    '        | empty'
    -    ...
    -
    -
    - -Note: You can write empty rules anywhere by simply specifying an empty -right hand side. However, I personally find that writing an "empty" -rule and using "empty" to denote an empty production is easier to read -and more clearly states your intentions. - -

    6.5 Changing the starting symbol

    - - -Normally, the first rule found in a yacc specification defines the starting grammar rule (top level rule). To change this, simply -supply a start specifier in your file. For example: - -
    -
    -start = 'foo'
    -
    -def p_bar(p):
    -    'bar : A B'
    -
    -# This is the starting rule due to the start specifier above
    -def p_foo(p):
    -    'foo : bar X'
    -...
    -
    -
    - -The use of a start specifier may be useful during debugging -since you can use it to have yacc build a subset of a larger grammar. -For this purpose, it is also possible to specify a starting symbol as -an argument to yacc(). For example: - -
    -
    -parser = yacc.yacc(start='foo')
    -
    -
    - -

    6.6 Dealing With Ambiguous Grammars

    - - -The expression grammar given in the earlier example has been written -in a special format to eliminate ambiguity. However, in many -situations, it is extremely difficult or awkward to write grammars in -this format. A much more natural way to express the grammar is in a -more compact form like this: - -
    -
    -expression : expression PLUS expression
    -           | expression MINUS expression
    -           | expression TIMES expression
    -           | expression DIVIDE expression
    -           | LPAREN expression RPAREN
    -           | NUMBER
    -
    -
    - -Unfortunately, this grammar specification is ambiguous. For example, -if you are parsing the string "3 * 4 + 5", there is no way to tell how -the operators are supposed to be grouped. For example, does the -expression mean "(3 * 4) + 5" or is it "3 * (4+5)"? - -

    -When an ambiguous grammar is given to yacc.py it will print -messages about "shift/reduce conflicts" or "reduce/reduce conflicts". -A shift/reduce conflict is caused when the parser generator can't -decide whether or not to reduce a rule or shift a symbol on the -parsing stack. For example, consider the string "3 * 4 + 5" and the -internal parsing stack: - -

    -
    -Step Symbol Stack           Input Tokens            Action
    ----- ---------------------  ---------------------   -------------------------------
    -1    $                                3 * 4 + 5$    Shift 3
    -2    $ 3                                * 4 + 5$    Reduce : expression : NUMBER
    -3    $ expr                             * 4 + 5$    Shift *
    -4    $ expr *                             4 + 5$    Shift 4
    -5    $ expr * 4                             + 5$    Reduce: expression : NUMBER
    -6    $ expr * expr                          + 5$    SHIFT/REDUCE CONFLICT ????
    -
    -
    - -In this case, when the parser reaches step 6, it has two options. One -is to reduce the rule expr : expr * expr on the stack. The -other option is to shift the token + on the stack. Both -options are perfectly legal from the rules of the -context-free-grammar. - -

    -By default, all shift/reduce conflicts are resolved in favor of -shifting. Therefore, in the above example, the parser will always -shift the + instead of reducing. Although this strategy -works in many cases (for example, the case of -"if-then" versus "if-then-else"), it is not enough for arithmetic expressions. In fact, -in the above example, the decision to shift + is completely -wrong---we should have reduced expr * expr since -multiplication has higher mathematical precedence than addition. - -

    To resolve ambiguity, especially in expression -grammars, yacc.py allows individual tokens to be assigned a -precedence level and associativity. This is done by adding a variable -precedence to the grammar file like this: - -

    -
    -precedence = (
    -    ('left', 'PLUS', 'MINUS'),
    -    ('left', 'TIMES', 'DIVIDE'),
    -)
    -
    -
    - -This declaration specifies that PLUS/MINUS have the -same precedence level and are left-associative and that -TIMES/DIVIDE have the same precedence and are -left-associative. Within the precedence declaration, tokens -are ordered from lowest to highest precedence. Thus, this declaration -specifies that TIMES/DIVIDE have higher precedence -than PLUS/MINUS (since they appear later in the -precedence specification). - -

    -The precedence specification works by associating a numerical -precedence level value and associativity direction to the listed -tokens. For example, in the above example you get: - -

    -
    -PLUS      : level = 1,  assoc = 'left'
    -MINUS     : level = 1,  assoc = 'left'
    -TIMES     : level = 2,  assoc = 'left'
    -DIVIDE    : level = 2,  assoc = 'left'
    -
    -
    - -These values are then used to attach a numerical precedence value and -associativity direction to each grammar rule. This is always -determined by looking at the precedence of the right-most terminal -symbol. For example: - -
    -
    -expression : expression PLUS expression                 # level = 1, left
    -           | expression MINUS expression                # level = 1, left
    -           | expression TIMES expression                # level = 2, left
    -           | expression DIVIDE expression               # level = 2, left
    -           | LPAREN expression RPAREN                   # level = None (not specified)
    -           | NUMBER                                     # level = None (not specified)
    -
    -
    - -When shift/reduce conflicts are encountered, the parser generator resolves the conflict by -looking at the precedence rules and associativity specifiers. - -

    -

      -
    1. If the current token has higher precedence than the rule on the stack, it is shifted. -
    2. If the grammar rule on the stack has higher precedence, the rule is reduced. -
    3. If the current token and the grammar rule have the same precedence, the -rule is reduced for left associativity, whereas the token is shifted for right associativity. -
    4. If nothing is known about the precedence, shift/reduce conflicts are resolved in -favor of shifting (the default). -
    - -For example, if "expression PLUS expression" has been parsed and the -next token is "TIMES", the action is going to be a shift because -"TIMES" has a higher precedence level than "PLUS". On the other hand, -if "expression TIMES expression" has been parsed and the next token is -"PLUS", the action is going to be reduce because "PLUS" has a lower -precedence than "TIMES." - -

    -When shift/reduce conflicts are resolved using the first three -techniques (with the help of precedence rules), yacc.py will -report no errors or conflicts in the grammar (although it will print -some information in the parser.out debugging file). - -

    -One problem with the precedence specifier technique is that it is -sometimes necessary to change the precedence of an operator in certain -contexts. For example, consider a unary-minus operator in "3 + 4 * --5". Mathematically, the unary minus is normally given a very high -precedence--being evaluated before the multiply. However, in our -precedence specifier, MINUS has a lower precedence than TIMES. To -deal with this, precedence rules can be given for so-called "fictitious tokens" -like this: - -

    -
    -precedence = (
    -    ('left', 'PLUS', 'MINUS'),
    -    ('left', 'TIMES', 'DIVIDE'),
    -    ('right', 'UMINUS'),            # Unary minus operator
    -)
    -
    -
    - -Now, in the grammar file, we can write our unary minus rule like this: - -
    -
    -def p_expr_uminus(p):
    -    'expression : MINUS expression %prec UMINUS'
    -    p[0] = -p[2]
    -
    -
    - -In this case, %prec UMINUS overrides the default rule precedence--setting it to that -of UMINUS in the precedence specifier. - -

    -At first, the use of UMINUS in this example may appear very confusing. -UMINUS is not an input token or a grammar rule. Instead, you should -think of it as the name of a special marker in the precedence table. When you use the %prec qualifier, you're simply -telling yacc that you want the precedence of the expression to be the same as for this special marker instead of the usual precedence. - -

    -It is also possible to specify non-associativity in the precedence table. This would -be used when you don't want operations to chain together. For example, suppose -you wanted to support comparison operators like < and > but you didn't want to allow -combinations like a < b < c. To do this, simply specify a rule like this: - -

    -
    -precedence = (
    -    ('nonassoc', 'LESSTHAN', 'GREATERTHAN'),  # Nonassociative operators
    -    ('left', 'PLUS', 'MINUS'),
    -    ('left', 'TIMES', 'DIVIDE'),
    -    ('right', 'UMINUS'),            # Unary minus operator
    -)
    -
    -
    - -

    -If you do this, the occurrence of input text such as a < b < c will result in a syntax error. However, simple -expressions such as a < b will still be fine. - -

    -Reduce/reduce conflicts are caused when there are multiple grammar -rules that can be applied to a given set of symbols. This kind of -conflict is almost always bad and is always resolved by picking the -rule that appears first in the grammar file. Reduce/reduce conflicts -are almost always caused when different sets of grammar rules somehow -generate the same set of symbols. For example: - -

    -
    -assignment :  ID EQUALS NUMBER
    -           |  ID EQUALS expression
    -           
    -expression : expression PLUS expression
    -           | expression MINUS expression
    -           | expression TIMES expression
    -           | expression DIVIDE expression
    -           | LPAREN expression RPAREN
    -           | NUMBER
    -
    -
    - -In this case, a reduce/reduce conflict exists between these two rules: - -
    -
    -assignment  : ID EQUALS NUMBER
    -expression  : NUMBER
    -
    -
    - -For example, if you wrote "a = 5", the parser can't figure out if this -is supposed to be reduced as assignment : ID EQUALS NUMBER or -whether it's supposed to reduce the 5 as an expression and then reduce -the rule assignment : ID EQUALS expression. - -

    -It should be noted that reduce/reduce conflicts are notoriously -difficult to spot simply looking at the input grammar. When a -reduce/reduce conflict occurs, yacc() will try to help by -printing a warning message such as this: - -

    -
    -WARNING: 1 reduce/reduce conflict
    -WARNING: reduce/reduce conflict in state 15 resolved using rule (assignment -> ID EQUALS NUMBER)
    -WARNING: rejected rule (expression -> NUMBER)
    -
    -
    - -This message identifies the two rules that are in conflict. However, -it may not tell you how the parser arrived at such a state. To try -and figure it out, you'll probably have to look at your grammar and -the contents of the -parser.out debugging file with an appropriately high level of -caffeination. - -

    6.7 The parser.out file

    - - -Tracking down shift/reduce and reduce/reduce conflicts is one of the finer pleasures of using an LR -parsing algorithm. To assist in debugging, yacc.py creates a debugging file called -'parser.out' when it generates the parsing table. The contents of this file look like the following: - -
    -
    -Unused terminals:
    -
    -
    -Grammar
    -
    -Rule 1     expression -> expression PLUS expression
    -Rule 2     expression -> expression MINUS expression
    -Rule 3     expression -> expression TIMES expression
    -Rule 4     expression -> expression DIVIDE expression
    -Rule 5     expression -> NUMBER
    -Rule 6     expression -> LPAREN expression RPAREN
    -
    -Terminals, with rules where they appear
    -
    -TIMES                : 3
    -error                : 
    -MINUS                : 2
    -RPAREN               : 6
    -LPAREN               : 6
    -DIVIDE               : 4
    -PLUS                 : 1
    -NUMBER               : 5
    -
    -Nonterminals, with rules where they appear
    -
    -expression           : 1 1 2 2 3 3 4 4 6 0
    -
    -
    -Parsing method: LALR
    -
    -
    -state 0
    -
    -    S' -> . expression
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 1
    -
    -    S' -> expression .
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    PLUS            shift and go to state 6
    -    MINUS           shift and go to state 5
    -    TIMES           shift and go to state 4
    -    DIVIDE          shift and go to state 7
    -
    -
    -state 2
    -
    -    expression -> LPAREN . expression RPAREN
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 3
    -
    -    expression -> NUMBER .
    -
    -    $               reduce using rule 5
    -    PLUS            reduce using rule 5
    -    MINUS           reduce using rule 5
    -    TIMES           reduce using rule 5
    -    DIVIDE          reduce using rule 5
    -    RPAREN          reduce using rule 5
    -
    -
    -state 4
    -
    -    expression -> expression TIMES . expression
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 5
    -
    -    expression -> expression MINUS . expression
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 6
    -
    -    expression -> expression PLUS . expression
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 7
    -
    -    expression -> expression DIVIDE . expression
    -    expression -> . expression PLUS expression
    -    expression -> . expression MINUS expression
    -    expression -> . expression TIMES expression
    -    expression -> . expression DIVIDE expression
    -    expression -> . NUMBER
    -    expression -> . LPAREN expression RPAREN
    -
    -    NUMBER          shift and go to state 3
    -    LPAREN          shift and go to state 2
    -
    -
    -state 8
    -
    -    expression -> LPAREN expression . RPAREN
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    RPAREN          shift and go to state 13
    -    PLUS            shift and go to state 6
    -    MINUS           shift and go to state 5
    -    TIMES           shift and go to state 4
    -    DIVIDE          shift and go to state 7
    -
    -
    -state 9
    -
    -    expression -> expression TIMES expression .
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    $               reduce using rule 3
    -    PLUS            reduce using rule 3
    -    MINUS           reduce using rule 3
    -    TIMES           reduce using rule 3
    -    DIVIDE          reduce using rule 3
    -    RPAREN          reduce using rule 3
    -
    -  ! PLUS            [ shift and go to state 6 ]
    -  ! MINUS           [ shift and go to state 5 ]
    -  ! TIMES           [ shift and go to state 4 ]
    -  ! DIVIDE          [ shift and go to state 7 ]
    -
    -state 10
    -
    -    expression -> expression MINUS expression .
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    $               reduce using rule 2
    -    PLUS            reduce using rule 2
    -    MINUS           reduce using rule 2
    -    RPAREN          reduce using rule 2
    -    TIMES           shift and go to state 4
    -    DIVIDE          shift and go to state 7
    -
    -  ! TIMES           [ reduce using rule 2 ]
    -  ! DIVIDE          [ reduce using rule 2 ]
    -  ! PLUS            [ shift and go to state 6 ]
    -  ! MINUS           [ shift and go to state 5 ]
    -
    -state 11
    -
    -    expression -> expression PLUS expression .
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    $               reduce using rule 1
    -    PLUS            reduce using rule 1
    -    MINUS           reduce using rule 1
    -    RPAREN          reduce using rule 1
    -    TIMES           shift and go to state 4
    -    DIVIDE          shift and go to state 7
    -
    -  ! TIMES           [ reduce using rule 1 ]
    -  ! DIVIDE          [ reduce using rule 1 ]
    -  ! PLUS            [ shift and go to state 6 ]
    -  ! MINUS           [ shift and go to state 5 ]
    -
    -state 12
    -
    -    expression -> expression DIVIDE expression .
    -    expression -> expression . PLUS expression
    -    expression -> expression . MINUS expression
    -    expression -> expression . TIMES expression
    -    expression -> expression . DIVIDE expression
    -
    -    $               reduce using rule 4
    -    PLUS            reduce using rule 4
    -    MINUS           reduce using rule 4
    -    TIMES           reduce using rule 4
    -    DIVIDE          reduce using rule 4
    -    RPAREN          reduce using rule 4
    -
    -  ! PLUS            [ shift and go to state 6 ]
    -  ! MINUS           [ shift and go to state 5 ]
    -  ! TIMES           [ shift and go to state 4 ]
    -  ! DIVIDE          [ shift and go to state 7 ]
    -
    -state 13
    -
    -    expression -> LPAREN expression RPAREN .
    -
    -    $               reduce using rule 6
    -    PLUS            reduce using rule 6
    -    MINUS           reduce using rule 6
    -    TIMES           reduce using rule 6
    -    DIVIDE          reduce using rule 6
    -    RPAREN          reduce using rule 6
    -
    -
    - -The different states that appear in this file are a representation of -every possible sequence of valid input tokens allowed by the grammar. -When receiving input tokens, the parser is building up a stack and -looking for matching rules. Each state keeps track of the grammar -rules that might be in the process of being matched at that point. Within each -rule, the "." character indicates the current location of the parse -within that rule. In addition, the actions for each valid input token -are listed. When a shift/reduce or reduce/reduce conflict arises, -rules not selected are prefixed with an !. For example: - -
    -
    -  ! TIMES           [ reduce using rule 2 ]
    -  ! DIVIDE          [ reduce using rule 2 ]
    -  ! PLUS            [ shift and go to state 6 ]
    -  ! MINUS           [ shift and go to state 5 ]
    -
    -
    - -By looking at these rules (and with a little practice), you can usually track down the source -of most parsing conflicts. It should also be stressed that not all shift-reduce conflicts are -bad. However, the only way to be sure that they are resolved correctly is to look at parser.out. - -

    6.8 Syntax Error Handling

    - - -If you are creating a parser for production use, the handling of -syntax errors is important. As a general rule, you don't want a -parser to simply throw up its hands and stop at the first sign of -trouble. Instead, you want it to report the error, recover if possible, and -continue parsing so that all of the errors in the input get reported -to the user at once. This is the standard behavior found in compilers -for languages such as C, C++, and Java. - -In PLY, when a syntax error occurs during parsing, the error is immediately -detected (i.e., the parser does not read any more tokens beyond the -source of the error). However, at this point, the parser enters a -recovery mode that can be used to try and continue further parsing. -As a general rule, error recovery in LR parsers is a delicate -topic that involves ancient rituals and black-magic. The recovery mechanism -provided by yacc.py is comparable to Unix yacc so you may want -consult a book like O'Reilly's "Lex and Yacc" for some of the finer details. - -

    -When a syntax error occurs, yacc.py performs the following steps: - -

      -
    1. On the first occurrence of an error, the user-defined p_error() function -is called with the offending token as an argument. However, if the syntax error is due to -reaching the end-of-file, p_error() is called with an - argument of None. -Afterwards, the parser enters -an "error-recovery" mode in which it will not make future calls to p_error() until it -has successfully shifted at least 3 tokens onto the parsing stack. - -

      -

    2. If no recovery action is taken in p_error(), the offending lookahead token is replaced -with a special error token. - -

      -

    3. If the offending lookahead token is already set to error, the top item of the parsing stack is -deleted. - -

      -

    4. If the entire parsing stack is unwound, the parser enters a restart state and attempts to start -parsing from its initial state. - -

      -

    5. If a grammar rule accepts error as a token, it will be -shifted onto the parsing stack. - -

      -

    6. If the top item of the parsing stack is error, lookahead tokens will be discarded until the -parser can successfully shift a new symbol or reduce a rule involving error. -
    - -

    6.8.1 Recovery and resynchronization with error rules

    - - -The most well-behaved approach for handling syntax errors is to write grammar rules that include the error -token. For example, suppose your language had a grammar rule for a print statement like this: - -
    -
    -def p_statement_print(p):
    -     'statement : PRINT expr SEMI'
    -     ...
    -
    -
    - -To account for the possibility of a bad expression, you might write an additional grammar rule like this: - -
    -
    -def p_statement_print_error(p):
    -     'statement : PRINT error SEMI'
    -     print("Syntax error in print statement. Bad expression")
    -
    -
    -
    - -In this case, the error token will match any sequence of -tokens that might appear up to the first semicolon that is -encountered. Once the semicolon is reached, the rule will be -invoked and the error token will go away. - -

    -This type of recovery is sometimes known as parser resynchronization. -The error token acts as a wildcard for any bad input text and -the token immediately following error acts as a -synchronization token. - -

    -It is important to note that the error token usually does not appear as the last token -on the right in an error rule. For example: - -

    -
    -def p_statement_print_error(p):
    -    'statement : PRINT error'
    -    print("Syntax error in print statement. Bad expression")
    -
    -
    - -This is because the first bad token encountered will cause the rule to -be reduced--which may make it difficult to recover if more bad tokens -immediately follow. - -

    6.8.2 Panic mode recovery

    - - -An alternative error recovery scheme is to enter a panic mode recovery in which tokens are -discarded to a point where the parser might be able to recover in some sensible manner. - -

    -Panic mode recovery is implemented entirely in the p_error() function. For example, this -function starts discarding tokens until it reaches a closing '}'. Then, it restarts the -parser in its initial state. - -

    -
    -def p_error(p):
    -    print("Whoa. You are seriously hosed.")
    -    if not p:
    -        print("End of File!")
    -        return
    -
    -    # Read ahead looking for a closing '}'
    -    while True:
    -        tok = parser.token()             # Get the next token
    -        if not tok or tok.type == 'RBRACE': 
    -            break
    -    parser.restart()
    -
    -
    - -

    -This function simply discards the bad token and tells the parser that the error was ok. - -

    -
    -def p_error(p):
    -    if p:
    -         print("Syntax error at token", p.type)
    -         # Just discard the token and tell the parser it's okay.
    -         parser.errok()
    -    else:
    -         print("Syntax error at EOF")
    -
    -
    - -

    -More information on these methods is as follows: -

    - -

    -

      -
    • parser.errok(). This resets the parser state so it doesn't think it's in error-recovery -mode. This will prevent an error token from being generated and will reset the internal -error counters so that the next syntax error will call p_error() again. - -

      -

    • parser.token(). This returns the next token on the input stream. - -

      -

    • parser.restart(). This discards the entire parsing stack and resets the parser -to its initial state. -
    - -

    -To supply the next lookahead token to the parser, p_error() can return a token. This might be -useful if trying to synchronize on special characters. For example: - -

    -
    -def p_error(p):
    -    # Read ahead looking for a terminating ";"
    -    while True:
    -        tok = parser.token()             # Get the next token
    -        if not tok or tok.type == 'SEMI': break
    -    parser.errok()
    -
    -    # Return SEMI to the parser as the next lookahead token
    -    return tok  
    -
    -
    - -

    -Keep in mind in that the above error handling functions, -parser is an instance of the parser created by -yacc(). You'll need to save this instance someplace in your -code so that you can refer to it during error handling. -

    - -

    6.8.3 Signalling an error from a production

    - - -If necessary, a production rule can manually force the parser to enter error recovery. This -is done by raising the SyntaxError exception like this: - -
    -
    -def p_production(p):
    -    'production : some production ...'
    -    raise SyntaxError
    -
    -
    - -The effect of raising SyntaxError is the same as if the last symbol shifted onto the -parsing stack was actually a syntax error. Thus, when you do this, the last symbol shifted is popped off -of the parsing stack and the current lookahead token is set to an error token. The parser -then enters error-recovery mode where it tries to reduce rules that can accept error tokens. -The steps that follow from this point are exactly the same as if a syntax error were detected and -p_error() were called. - -

    -One important aspect of manually setting an error is that the p_error() function will NOT be -called in this case. If you need to issue an error message, make sure you do it in the production that -raises SyntaxError. - -

    -Note: This feature of PLY is meant to mimic the behavior of the YYERROR macro in yacc. - -

    6.8.4 When Do Syntax Errors Get Reported

    - - -

    -In most cases, yacc will handle errors as soon as a bad input token is -detected on the input. However, be aware that yacc may choose to -delay error handling until after it has reduced one or more grammar -rules first. This behavior might be unexpected, but it's related to -special states in the underlying parsing table known as "defaulted -states." A defaulted state is parsing condition where the same -grammar rule will be reduced regardless of what valid token -comes next on the input. For such states, yacc chooses to go ahead -and reduce the grammar rule without reading the next input -token. If the next token is bad, yacc will eventually get around to reading it and -report a syntax error. It's just a little unusual in that you might -see some of your grammar rules firing immediately prior to the syntax -error. -

    - -

    -Usually, the delayed error reporting with defaulted states is harmless -(and there are other reasons for wanting PLY to behave in this way). -However, if you need to turn this behavior off for some reason. You -can clear the defaulted states table like this: -

    - -
    -
    -parser = yacc.yacc()
    -parser.defaulted_states = {}
    -
    -
    - -

    -Disabling defaulted states is not recommended if your grammar makes use -of embedded actions as described in Section 6.11.

    - -

    6.8.5 General comments on error handling

    - - -For normal types of languages, error recovery with error rules and resynchronization characters is probably the most reliable -technique. This is because you can instrument the grammar to catch errors at selected places where it is relatively easy -to recover and continue parsing. Panic mode recovery is really only useful in certain specialized applications where you might want -to discard huge portions of the input text to find a valid restart point. - -

    6.9 Line Number and Position Tracking

    - - -Position tracking is often a tricky problem when writing compilers. -By default, PLY tracks the line number and position of all tokens. -This information is available using the following functions: - -
      -
    • p.lineno(num). Return the line number for symbol num -
    • p.lexpos(num). Return the lexing position for symbol num -
    - -For example: - -
    -
    -def p_expression(p):
    -    'expression : expression PLUS expression'
    -    line   = p.lineno(2)        # line number of the PLUS token
    -    index  = p.lexpos(2)        # Position of the PLUS token
    -
    -
    - -As an optional feature, yacc.py can automatically track line -numbers and positions for all of the grammar symbols as well. -However, this extra tracking requires extra processing and can -significantly slow down parsing. Therefore, it must be enabled by -passing the -tracking=True option to yacc.parse(). For example: - -
    -
    -yacc.parse(data,tracking=True)
    -
    -
    - -Once enabled, the lineno() and lexpos() methods work -for all grammar symbols. In addition, two additional methods can be -used: - -
      -
    • p.linespan(num). Return a tuple (startline,endline) with the starting and ending line number for symbol num. -
    • p.lexspan(num). Return a tuple (start,end) with the starting and ending positions for symbol num. -
    - -For example: - -
    -
    -def p_expression(p):
    -    'expression : expression PLUS expression'
    -    p.lineno(1)        # Line number of the left expression
    -    p.lineno(2)        # line number of the PLUS operator
    -    p.lineno(3)        # line number of the right expression
    -    ...
    -    start,end = p.linespan(3)    # Start,end lines of the right expression
    -    starti,endi = p.lexspan(3)   # Start,end positions of right expression
    -
    -
    -
    - -Note: The lexspan() function only returns the range of values up to the start of the last grammar symbol. - -

    -Although it may be convenient for PLY to track position information on -all grammar symbols, this is often unnecessary. For example, if you -are merely using line number information in an error message, you can -often just key off of a specific token in the grammar rule. For -example: - -

    -
    -def p_bad_func(p):
    -    'funccall : fname LPAREN error RPAREN'
    -    # Line number reported from LPAREN token
    -    print("Bad function call at line", p.lineno(2))
    -
    -
    - -

    -Similarly, you may get better parsing performance if you only -selectively propagate line number information where it's needed using -the p.set_lineno() method. For example: - -

    -
    -def p_fname(p):
    -    'fname : ID'
    -    p[0] = p[1]
    -    p.set_lineno(0,p.lineno(1))
    -
    -
    - -PLY doesn't retain line number information from rules that have already been -parsed. If you are building an abstract syntax tree and need to have line numbers, -you should make sure that the line numbers appear in the tree itself. - -

    6.10 AST Construction

    - - -yacc.py provides no special functions for constructing an -abstract syntax tree. However, such construction is easy enough to do -on your own. - -

    A minimal way to construct a tree is to simply create and -propagate a tuple or list in each grammar rule function. There -are many possible ways to do this, but one example would be something -like this: - -

    -
    -def p_expression_binop(p):
    -    '''expression : expression PLUS expression
    -                  | expression MINUS expression
    -                  | expression TIMES expression
    -                  | expression DIVIDE expression'''
    -
    -    p[0] = ('binary-expression',p[2],p[1],p[3])
    -
    -def p_expression_group(p):
    -    'expression : LPAREN expression RPAREN'
    -    p[0] = ('group-expression',p[2])
    -
    -def p_expression_number(p):
    -    'expression : NUMBER'
    -    p[0] = ('number-expression',p[1])
    -
    -
    - -

    -Another approach is to create a set of data structure for different -kinds of abstract syntax tree nodes and assign nodes to p[0] -in each rule. For example: - -

    -
    -class Expr: pass
    -
    -class BinOp(Expr):
    -    def __init__(self,left,op,right):
    -        self.type = "binop"
    -        self.left = left
    -        self.right = right
    -        self.op = op
    -
    -class Number(Expr):
    -    def __init__(self,value):
    -        self.type = "number"
    -        self.value = value
    -
    -def p_expression_binop(p):
    -    '''expression : expression PLUS expression
    -                  | expression MINUS expression
    -                  | expression TIMES expression
    -                  | expression DIVIDE expression'''
    -
    -    p[0] = BinOp(p[1],p[2],p[3])
    -
    -def p_expression_group(p):
    -    'expression : LPAREN expression RPAREN'
    -    p[0] = p[2]
    -
    -def p_expression_number(p):
    -    'expression : NUMBER'
    -    p[0] = Number(p[1])
    -
    -
    - -The advantage to this approach is that it may make it easier to attach more complicated -semantics, type checking, code generation, and other features to the node classes. - -

    -To simplify tree traversal, it may make sense to pick a very generic -tree structure for your parse tree nodes. For example: - -

    -
    -class Node:
    -    def __init__(self,type,children=None,leaf=None):
    -         self.type = type
    -         if children:
    -              self.children = children
    -         else:
    -              self.children = [ ]
    -         self.leaf = leaf
    -	 
    -def p_expression_binop(p):
    -    '''expression : expression PLUS expression
    -                  | expression MINUS expression
    -                  | expression TIMES expression
    -                  | expression DIVIDE expression'''
    -
    -    p[0] = Node("binop", [p[1],p[3]], p[2])
    -
    -
    - -

    6.11 Embedded Actions

    - - -The parsing technique used by yacc only allows actions to be executed at the end of a rule. For example, -suppose you have a rule like this: - -
    -
    -def p_foo(p):
    -    "foo : A B C D"
    -    print("Parsed a foo", p[1],p[2],p[3],p[4])
    -
    -
    - -

    -In this case, the supplied action code only executes after all of the -symbols A, B, C, and D have been -parsed. Sometimes, however, it is useful to execute small code -fragments during intermediate stages of parsing. For example, suppose -you wanted to perform some action immediately after A has -been parsed. To do this, write an empty rule like this: - -

    -
    -def p_foo(p):
    -    "foo : A seen_A B C D"
    -    print("Parsed a foo", p[1],p[3],p[4],p[5])
    -    print("seen_A returned", p[2])
    -
    -def p_seen_A(p):
    -    "seen_A :"
    -    print("Saw an A = ", p[-1])   # Access grammar symbol to left
    -    p[0] = some_value            # Assign value to seen_A
    -
    -
    -
    - -

    -In this example, the empty seen_A rule executes immediately -after A is shifted onto the parsing stack. Within this -rule, p[-1] refers to the symbol on the stack that appears -immediately to the left of the seen_A symbol. In this case, -it would be the value of A in the foo rule -immediately above. Like other rules, a value can be returned from an -embedded action by simply assigning it to p[0] - -

    -The use of embedded actions can sometimes introduce extra shift/reduce conflicts. For example, -this grammar has no conflicts: - -

    -
    -def p_foo(p):
    -    """foo : abcd
    -           | abcx"""
    -
    -def p_abcd(p):
    -    "abcd : A B C D"
    -
    -def p_abcx(p):
    -    "abcx : A B C X"
    -
    -
    - -However, if you insert an embedded action into one of the rules like this, - -
    -
    -def p_foo(p):
    -    """foo : abcd
    -           | abcx"""
    -
    -def p_abcd(p):
    -    "abcd : A B C D"
    -
    -def p_abcx(p):
    -    "abcx : A B seen_AB C X"
    -
    -def p_seen_AB(p):
    -    "seen_AB :"
    -
    -
    - -an extra shift-reduce conflict will be introduced. This conflict is -caused by the fact that the same symbol C appears next in -both the abcd and abcx rules. The parser can either -shift the symbol (abcd rule) or reduce the empty -rule seen_AB (abcx rule). - -

    -A common use of embedded rules is to control other aspects of parsing -such as scoping of local variables. For example, if you were parsing C code, you might -write code like this: - -

    -
    -def p_statements_block(p):
    -    "statements: LBRACE new_scope statements RBRACE"""
    -    # Action code
    -    ...
    -    pop_scope()        # Return to previous scope
    -
    -def p_new_scope(p):
    -    "new_scope :"
    -    # Create a new scope for local variables
    -    s = new_scope()
    -    push_scope(s)
    -    ...
    -
    -
    - -In this case, the embedded action new_scope executes -immediately after a LBRACE ({) symbol is parsed. -This might adjust internal symbol tables and other aspects of the -parser. Upon completion of the rule statements_block, code -might undo the operations performed in the embedded action -(e.g., pop_scope()). - -

    6.12 Miscellaneous Yacc Notes

    - - -
      - -
    • By default, yacc.py relies on lex.py for tokenizing. However, an alternative tokenizer -can be supplied as follows: - -
      -
      -parser = yacc.parse(lexer=x)
      -
      -
      -in this case, x must be a Lexer object that minimally has a x.token() method for retrieving the next -token. If an input string is given to yacc.parse(), the lexer must also have an x.input() method. - -

      -

    • By default, the yacc generates tables in debugging mode (which produces the parser.out file and other output). -To disable this, use - -
      -
      -parser = yacc.yacc(debug=False)
      -
      -
      - -

      -

    • To change the name of the parsetab.py file, use: - -
      -
      -parser = yacc.yacc(tabmodule="foo")
      -
      -
      - -

      -Normally, the parsetab.py file is placed into the same directory as -the module where the parser is defined. If you want it to go somewhere else, you can -given an absolute package name for tabmodule instead. In that case, the -tables will be written there. -

      - -

      -

    • To change the directory in which the parsetab.py file (and other output files) are written, use: -
      -
      -parser = yacc.yacc(tabmodule="foo",outputdir="somedirectory")
      -
      -
      - -

      -Note: Be aware that unless the directory specified is also on Python's path (sys.path), subsequent -imports of the table file will fail. As a general rule, it's better to specify a destination using the -tabmodule argument instead of directly specifying a directory using the outputdir argument. -

      - -

      -

    • To prevent yacc from generating any kind of parser table file, use: -
      -
      -parser = yacc.yacc(write_tables=False)
      -
      -
      - -Note: If you disable table generation, yacc() will regenerate the parsing tables -each time it runs (which may take awhile depending on how large your grammar is). - -

      -

    • To print copious amounts of debugging during parsing, use: - -
      -
      -parser.parse(input_text, debug=True)     
      -
      -
      - -

      -

    • Since the generation of the LALR tables is relatively expensive, previously generated tables are -cached and reused if possible. The decision to regenerate the tables is determined by taking an MD5 -checksum of all grammar rules and precedence rules. Only in the event of a mismatch are the tables regenerated. - -

      -It should be noted that table generation is reasonably efficient, even for grammars that involve around a 100 rules -and several hundred states.

    • - - -

      -

    • Since LR parsing is driven by tables, the performance of the parser is largely independent of the -size of the grammar. The biggest bottlenecks will be the lexer and the complexity of the code in your grammar rules. -
    • -

      - -

      -

    • yacc() also allows parsers to be defined as classes and as closures (see the section on alternative specification of -lexers). However, be aware that only one parser may be defined in a single module (source file). There are various -error checks and validation steps that may issue confusing error messages if you try to define multiple parsers -in the same source file. -
    • -

      - -

      -

    • Decorators of production rules have to update the wrapped function's line number. wrapper.co_firstlineno = func.__code__.co_firstlineno: - -
      -
      -from functools import wraps
      -from nodes import Collection
      -
      -
      -def strict(*types):
      -    def decorate(func):
      -        @wraps(func)
      -        def wrapper(p):
      -            func(p)
      -            if not isinstance(p[0], types):
      -                raise TypeError
      -
      -        wrapper.co_firstlineno = func.__code__.co_firstlineno
      -        return wrapper
      -
      -    return decorate
      -
      -@strict(Collection)
      -def p_collection(p):
      -    """
      -    collection  : sequence
      -                | map
      -    """
      -    p[0] = p[1]
      -
      -
      - -
    • -

      - - -
    -

    - - -

    7. Multiple Parsers and Lexers

    - - -In advanced parsing applications, you may want to have multiple -parsers and lexers. - -

    -As a general rules this isn't a problem. However, to make it work, -you need to carefully make sure everything gets hooked up correctly. -First, make sure you save the objects returned by lex() and -yacc(). For example: - -

    -
    -lexer  = lex.lex()       # Return lexer object
    -parser = yacc.yacc()     # Return parser object
    -
    -
    - -Next, when parsing, make sure you give the parse() function a reference to the lexer it -should be using. For example: - -
    -
    -parser.parse(text,lexer=lexer)
    -
    -
    - -If you forget to do this, the parser will use the last lexer -created--which is not always what you want. - -

    -Within lexer and parser rule functions, these objects are also -available. In the lexer, the "lexer" attribute of a token refers to -the lexer object that triggered the rule. For example: - -

    -
    -def t_NUMBER(t):
    -   r'\d+'
    -   ...
    -   print(t.lexer)           # Show lexer object
    -
    -
    - -In the parser, the "lexer" and "parser" attributes refer to the lexer -and parser objects respectively. - -
    -
    -def p_expr_plus(p):
    -   'expr : expr PLUS expr'
    -   ...
    -   print(p.parser)          # Show parser object
    -   print(p.lexer)           # Show lexer object
    -
    -
    - -If necessary, arbitrary attributes can be attached to the lexer or parser object. -For example, if you wanted to have different parsing modes, you could attach a mode -attribute to the parser object and look at it later. - -

    8. Using Python's Optimized Mode

    - - -Because PLY uses information from doc-strings, parsing and lexing -information must be gathered while running the Python interpreter in -normal mode (i.e., not with the -O or -OO options). However, if you -specify optimized mode like this: - -
    -
    -lex.lex(optimize=1)
    -yacc.yacc(optimize=1)
    -
    -
    - -then PLY can later be used when Python runs in optimized mode. To make this work, -make sure you first run Python in normal mode. Once the lexing and parsing tables -have been generated the first time, run Python in optimized mode. PLY will use -the tables without the need for doc strings. - -

    -Beware: running PLY in optimized mode disables a lot of error -checking. You should only do this when your project has stabilized -and you don't need to do any debugging. One of the purposes of -optimized mode is to substantially decrease the startup time of -your compiler (by assuming that everything is already properly -specified and works). - -

    9. Advanced Debugging

    - - -

    -Debugging a compiler is typically not an easy task. PLY provides some -advanced diagostic capabilities through the use of Python's -logging module. The next two sections describe this: - -

    9.1 Debugging the lex() and yacc() commands

    - - -

    -Both the lex() and yacc() commands have a debugging -mode that can be enabled using the debug flag. For example: - -

    -
    -lex.lex(debug=True)
    -yacc.yacc(debug=True)
    -
    -
    - -Normally, the output produced by debugging is routed to either -standard error or, in the case of yacc(), to a file -parser.out. This output can be more carefully controlled -by supplying a logging object. Here is an example that adds -information about where different debugging messages are coming from: - -
    -
    -# Set up a logging object
    -import logging
    -logging.basicConfig(
    -    level = logging.DEBUG,
    -    filename = "parselog.txt",
    -    filemode = "w",
    -    format = "%(filename)10s:%(lineno)4d:%(message)s"
    -)
    -log = logging.getLogger()
    -
    -lex.lex(debug=True,debuglog=log)
    -yacc.yacc(debug=True,debuglog=log)
    -
    -
    - -If you supply a custom logger, the amount of debugging -information produced can be controlled by setting the logging level. -Typically, debugging messages are either issued at the DEBUG, -INFO, or WARNING levels. - -

    -PLY's error messages and warnings are also produced using the logging -interface. This can be controlled by passing a logging object -using the errorlog parameter. - -

    -
    -lex.lex(errorlog=log)
    -yacc.yacc(errorlog=log)
    -
    -
    - -If you want to completely silence warnings, you can either pass in a -logging object with an appropriate filter level or use the NullLogger -object defined in either lex or yacc. For example: - -
    -
    -yacc.yacc(errorlog=yacc.NullLogger())
    -
    -
    - -

    9.2 Run-time Debugging

    - - -

    -To enable run-time debugging of a parser, use the debug option to parse. This -option can either be an integer (which simply turns debugging on or off) or an instance -of a logger object. For example: - -

    -
    -log = logging.getLogger()
    -parser.parse(input,debug=log)
    -
    -
    - -If a logging object is passed, you can use its filtering level to control how much -output gets generated. The INFO level is used to produce information -about rule reductions. The DEBUG level will show information about the -parsing stack, token shifts, and other details. The ERROR level shows information -related to parsing errors. - -

    -For very complicated problems, you should pass in a logging object that -redirects to a file where you can more easily inspect the output after -execution. - -

    10. Packaging Advice

    - - -

    -If you are distributing a package that makes use of PLY, you should -spend a few moments thinking about how you want to handle the files -that are automatically generated. For example, the parsetab.py -file generated by the yacc() function.

    - -

    -Starting in PLY-3.6, the table files are created in the same directory -as the file where a parser is defined. This means that the -parsetab.py file will live side-by-side with your parser -specification. In terms of packaging, this is probably the easiest and -most sane approach to manage. You don't need to give yacc() -any extra arguments and it should just "work."

    - -

    -One concern is the management of the parsetab.py file itself. -For example, should you have this file checked into version control (e.g., GitHub), -should it be included in a package distribution as a normal file, or should you -just let PLY generate it automatically for the user when they install your package? -

    - -

    -As of PLY-3.6, the parsetab.py file should be compatible across all versions -of Python including Python 2 and 3. Thus, a table file generated in Python 2 should -work fine if it's used on Python 3. Because of this, it should be relatively harmless -to distribute the parsetab.py file yourself if you need to. However, be aware -that older/newer versions of PLY may try to regenerate the file if there are future -enhancements or changes to its format. -

    - -

    -To make the generation of table files easier for the purposes of installation, you might -way to make your parser files executable using the -m option or similar. For -example: -

    - -
    -
    -# calc.py
    -...
    -...
    -def make_parser():
    -    parser = yacc.yacc()
    -    return parser
    -
    -if __name__ == '__main__':
    -    make_parser()
    -
    -
    - -

    -You can then use a command such as python -m calc.py to generate the tables. Alternatively, -a setup.py script, can import the module and use make_parser() to create the -parsing tables. -

    - -

    -If you're willing to sacrifice a little startup time, you can also instruct PLY to never write the -tables using yacc.yacc(write_tables=False, debug=False). In this mode, PLY will regenerate -the parsing tables from scratch each time. For a small grammar, you probably won't notice. For a -large grammar, you should probably reconsider--the parsing tables are meant to dramatically speed up this process. -

    - -

    -During operation, it is normal for PLY to produce diagnostic error -messages (usually printed to standard error). These are generated -entirely using the logging module. If you want to redirect -these messages or silence them, you can provide your own logging -object to yacc(). For example: -

    - -
    -
    -import logging
    -log = logging.getLogger('ply')
    -...
    -parser = yacc.yacc(errorlog=log)
    -
    -
    - -

    11. Where to go from here?

    - - -The examples directory of the PLY distribution contains several simple examples. Please consult a -compilers textbook for the theory and underlying implementation details or LR parsing. - - - - - - - - - - diff --git a/xonsh/ply/example/BASIC/README b/xonsh/ply/example/BASIC/README deleted file mode 100644 index be24a30..0000000 --- a/xonsh/ply/example/BASIC/README +++ /dev/null @@ -1,79 +0,0 @@ -Inspired by a September 14, 2006 Salon article "Why Johnny Can't Code" by -David Brin (http://www.salon.com/tech/feature/2006/09/14/basic/index.html), -I thought that a fully working BASIC interpreter might be an interesting, -if not questionable, PLY example. Uh, okay, so maybe it's just a bad idea, -but in any case, here it is. - -In this example, you'll find a rough implementation of 1964 Dartmouth BASIC -as described in the manual at: - - http://www.bitsavers.org/pdf/dartmouth/BASIC_Oct64.pdf - -See also: - - http://en.wikipedia.org/wiki/Dartmouth_BASIC - -This dialect is downright primitive---there are no string variables -and no facilities for interactive input. Moreover, subroutines and functions -are brain-dead even more than they usually are for BASIC. Of course, -the GOTO statement is provided. - -Nevertheless, there are a few interesting aspects of this example: - - - It illustrates a fully working interpreter including lexing, parsing, - and interpretation of instructions. - - - The parser shows how to catch and report various kinds of parsing - errors in a more graceful way. - - - The example both parses files (supplied on command line) and - interactive input entered line by line. - - - It shows how you might represent parsed information. In this case, - each BASIC statement is encoded into a Python tuple containing the - statement type and parameters. These tuples are then stored in - a dictionary indexed by program line numbers. - - - Even though it's just BASIC, the parser contains more than 80 - rules and 150 parsing states. Thus, it's a little more meaty than - the calculator example. - -To use the example, run it as follows: - - % python basic.py hello.bas - HELLO WORLD - % - -or use it interactively: - - % python basic.py - [BASIC] 10 PRINT "HELLO WORLD" - [BASIC] 20 END - [BASIC] RUN - HELLO WORLD - [BASIC] - -The following files are defined: - - basic.py - High level script that controls everything - basiclex.py - BASIC tokenizer - basparse.py - BASIC parser - basinterp.py - BASIC interpreter that runs parsed programs. - -In addition, a number of sample BASIC programs (.bas suffix) are -provided. These were taken out of the Dartmouth manual. - -Disclaimer: I haven't spent a ton of time testing this and it's likely that -I've skimped here and there on a few finer details (e.g., strictly enforcing -variable naming rules). However, the interpreter seems to be able to run -the examples in the BASIC manual. - -Have fun! - --Dave - - - - - - diff --git a/xonsh/ply/example/BASIC/basic.py b/xonsh/ply/example/BASIC/basic.py deleted file mode 100644 index 17687b1..0000000 --- a/xonsh/ply/example/BASIC/basic.py +++ /dev/null @@ -1,66 +0,0 @@ -# An implementation of Dartmouth BASIC (1964) -# - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -import basiclex -import basparse -import basinterp - -# If a filename has been specified, we try to run it. -# If a runtime error occurs, we bail out and enter -# interactive mode below -if len(sys.argv) == 2: - with open(sys.argv[1]) as f: - data = f.read() - prog = basparse.parse(data) - if not prog: - raise SystemExit - b = basinterp.BasicInterpreter(prog) - try: - b.run() - raise SystemExit - except RuntimeError: - pass - -else: - b = basinterp.BasicInterpreter({}) - -# Interactive mode. This incrementally adds/deletes statements -# from the program stored in the BasicInterpreter object. In -# addition, special commands 'NEW','LIST',and 'RUN' are added. -# Specifying a line number with no code deletes that line from -# the program. - -while 1: - try: - line = raw_input("[BASIC] ") - except EOFError: - raise SystemExit - if not line: - continue - line += "\n" - prog = basparse.parse(line) - if not prog: - continue - - keys = list(prog) - if keys[0] > 0: - b.add_statements(prog) - else: - stat = prog[keys[0]] - if stat[0] == 'RUN': - try: - b.run() - except RuntimeError: - pass - elif stat[0] == 'LIST': - b.list() - elif stat[0] == 'BLANK': - b.del_line(stat[1]) - elif stat[0] == 'NEW': - b.new() diff --git a/xonsh/ply/example/BASIC/basiclex.py b/xonsh/ply/example/BASIC/basiclex.py deleted file mode 100644 index 4151f4c..0000000 --- a/xonsh/ply/example/BASIC/basiclex.py +++ /dev/null @@ -1,61 +0,0 @@ -# An implementation of Dartmouth BASIC (1964) - -from ply import * - -keywords = ( - 'LET', 'READ', 'DATA', 'PRINT', 'GOTO', 'IF', 'THEN', 'FOR', 'NEXT', 'TO', 'STEP', - 'END', 'STOP', 'DEF', 'GOSUB', 'DIM', 'REM', 'RETURN', 'RUN', 'LIST', 'NEW', -) - -tokens = keywords + ( - 'EQUALS', 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'POWER', - 'LPAREN', 'RPAREN', 'LT', 'LE', 'GT', 'GE', 'NE', - 'COMMA', 'SEMI', 'INTEGER', 'FLOAT', 'STRING', - 'ID', 'NEWLINE' -) - -t_ignore = ' \t' - - -def t_REM(t): - r'REM .*' - return t - - -def t_ID(t): - r'[A-Z][A-Z0-9]*' - if t.value in keywords: - t.type = t.value - return t - -t_EQUALS = r'=' -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_POWER = r'\^' -t_DIVIDE = r'/' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_LT = r'<' -t_LE = r'<=' -t_GT = r'>' -t_GE = r'>=' -t_NE = r'<>' -t_COMMA = r'\,' -t_SEMI = r';' -t_INTEGER = r'\d+' -t_FLOAT = r'((\d*\.\d+)(E[\+-]?\d+)?|([1-9]\d*E[\+-]?\d+))' -t_STRING = r'\".*?\"' - - -def t_NEWLINE(t): - r'\n' - t.lexer.lineno += 1 - return t - - -def t_error(t): - print("Illegal character %s" % t.value[0]) - t.lexer.skip(1) - -lex.lex(debug=0) diff --git a/xonsh/ply/example/BASIC/basiclog.py b/xonsh/ply/example/BASIC/basiclog.py deleted file mode 100644 index 9258e29..0000000 --- a/xonsh/ply/example/BASIC/basiclog.py +++ /dev/null @@ -1,74 +0,0 @@ -# An implementation of Dartmouth BASIC (1964) -# - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -import logging -logging.basicConfig( - level=logging.INFO, - filename="parselog.txt", - filemode="w" -) -log = logging.getLogger() - -import basiclex -import basparse -import basinterp - -# If a filename has been specified, we try to run it. -# If a runtime error occurs, we bail out and enter -# interactive mode below -if len(sys.argv) == 2: - with open(sys.argv[1]) as f: - data = f.read() - prog = basparse.parse(data, debug=log) - if not prog: - raise SystemExit - b = basinterp.BasicInterpreter(prog) - try: - b.run() - raise SystemExit - except RuntimeError: - pass - -else: - b = basinterp.BasicInterpreter({}) - -# Interactive mode. This incrementally adds/deletes statements -# from the program stored in the BasicInterpreter object. In -# addition, special commands 'NEW','LIST',and 'RUN' are added. -# Specifying a line number with no code deletes that line from -# the program. - -while 1: - try: - line = raw_input("[BASIC] ") - except EOFError: - raise SystemExit - if not line: - continue - line += "\n" - prog = basparse.parse(line, debug=log) - if not prog: - continue - - keys = list(prog) - if keys[0] > 0: - b.add_statements(prog) - else: - stat = prog[keys[0]] - if stat[0] == 'RUN': - try: - b.run() - except RuntimeError: - pass - elif stat[0] == 'LIST': - b.list() - elif stat[0] == 'BLANK': - b.del_line(stat[1]) - elif stat[0] == 'NEW': - b.new() diff --git a/xonsh/ply/example/BASIC/basinterp.py b/xonsh/ply/example/BASIC/basinterp.py deleted file mode 100644 index 67762c7..0000000 --- a/xonsh/ply/example/BASIC/basinterp.py +++ /dev/null @@ -1,496 +0,0 @@ -# This file provides the runtime support for running a basic program -# Assumes the program has been parsed using basparse.py - -import sys -import math -import random - - -class BasicInterpreter: - - # Initialize the interpreter. prog is a dictionary - # containing (line,statement) mappings - def __init__(self, prog): - self.prog = prog - - self.functions = { # Built-in function table - 'SIN': lambda z: math.sin(self.eval(z)), - 'COS': lambda z: math.cos(self.eval(z)), - 'TAN': lambda z: math.tan(self.eval(z)), - 'ATN': lambda z: math.atan(self.eval(z)), - 'EXP': lambda z: math.exp(self.eval(z)), - 'ABS': lambda z: abs(self.eval(z)), - 'LOG': lambda z: math.log(self.eval(z)), - 'SQR': lambda z: math.sqrt(self.eval(z)), - 'INT': lambda z: int(self.eval(z)), - 'RND': lambda z: random.random() - } - - # Collect all data statements - def collect_data(self): - self.data = [] - for lineno in self.stat: - if self.prog[lineno][0] == 'DATA': - self.data = self.data + self.prog[lineno][1] - self.dc = 0 # Initialize the data counter - - # Check for end statements - def check_end(self): - has_end = 0 - for lineno in self.stat: - if self.prog[lineno][0] == 'END' and not has_end: - has_end = lineno - if not has_end: - print("NO END INSTRUCTION") - self.error = 1 - return - if has_end != lineno: - print("END IS NOT LAST") - self.error = 1 - - # Check loops - def check_loops(self): - for pc in range(len(self.stat)): - lineno = self.stat[pc] - if self.prog[lineno][0] == 'FOR': - forinst = self.prog[lineno] - loopvar = forinst[1] - for i in range(pc + 1, len(self.stat)): - if self.prog[self.stat[i]][0] == 'NEXT': - nextvar = self.prog[self.stat[i]][1] - if nextvar != loopvar: - continue - self.loopend[pc] = i - break - else: - print("FOR WITHOUT NEXT AT LINE %s" % self.stat[pc]) - self.error = 1 - - # Evaluate an expression - def eval(self, expr): - etype = expr[0] - if etype == 'NUM': - return expr[1] - elif etype == 'GROUP': - return self.eval(expr[1]) - elif etype == 'UNARY': - if expr[1] == '-': - return -self.eval(expr[2]) - elif etype == 'BINOP': - if expr[1] == '+': - return self.eval(expr[2]) + self.eval(expr[3]) - elif expr[1] == '-': - return self.eval(expr[2]) - self.eval(expr[3]) - elif expr[1] == '*': - return self.eval(expr[2]) * self.eval(expr[3]) - elif expr[1] == '/': - return float(self.eval(expr[2])) / self.eval(expr[3]) - elif expr[1] == '^': - return abs(self.eval(expr[2]))**self.eval(expr[3]) - elif etype == 'VAR': - var, dim1, dim2 = expr[1] - if not dim1 and not dim2: - if var in self.vars: - return self.vars[var] - else: - print("UNDEFINED VARIABLE %s AT LINE %s" % - (var, self.stat[self.pc])) - raise RuntimeError - # May be a list lookup or a function evaluation - if dim1 and not dim2: - if var in self.functions: - # A function - return self.functions[var](dim1) - else: - # A list evaluation - if var in self.lists: - dim1val = self.eval(dim1) - if dim1val < 1 or dim1val > len(self.lists[var]): - print("LIST INDEX OUT OF BOUNDS AT LINE %s" % - self.stat[self.pc]) - raise RuntimeError - return self.lists[var][dim1val - 1] - if dim1 and dim2: - if var in self.tables: - dim1val = self.eval(dim1) - dim2val = self.eval(dim2) - if dim1val < 1 or dim1val > len(self.tables[var]) or dim2val < 1 or dim2val > len(self.tables[var][0]): - print("TABLE INDEX OUT OUT BOUNDS AT LINE %s" % - self.stat[self.pc]) - raise RuntimeError - return self.tables[var][dim1val - 1][dim2val - 1] - print("UNDEFINED VARIABLE %s AT LINE %s" % - (var, self.stat[self.pc])) - raise RuntimeError - - # Evaluate a relational expression - def releval(self, expr): - etype = expr[1] - lhs = self.eval(expr[2]) - rhs = self.eval(expr[3]) - if etype == '<': - if lhs < rhs: - return 1 - else: - return 0 - - elif etype == '<=': - if lhs <= rhs: - return 1 - else: - return 0 - - elif etype == '>': - if lhs > rhs: - return 1 - else: - return 0 - - elif etype == '>=': - if lhs >= rhs: - return 1 - else: - return 0 - - elif etype == '=': - if lhs == rhs: - return 1 - else: - return 0 - - elif etype == '<>': - if lhs != rhs: - return 1 - else: - return 0 - - # Assignment - def assign(self, target, value): - var, dim1, dim2 = target - if not dim1 and not dim2: - self.vars[var] = self.eval(value) - elif dim1 and not dim2: - # List assignment - dim1val = self.eval(dim1) - if not var in self.lists: - self.lists[var] = [0] * 10 - - if dim1val > len(self.lists[var]): - print ("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc]) - raise RuntimeError - self.lists[var][dim1val - 1] = self.eval(value) - elif dim1 and dim2: - dim1val = self.eval(dim1) - dim2val = self.eval(dim2) - if not var in self.tables: - temp = [0] * 10 - v = [] - for i in range(10): - v.append(temp[:]) - self.tables[var] = v - # Variable already exists - if dim1val > len(self.tables[var]) or dim2val > len(self.tables[var][0]): - print("DIMENSION TOO LARGE AT LINE %s" % self.stat[self.pc]) - raise RuntimeError - self.tables[var][dim1val - 1][dim2val - 1] = self.eval(value) - - # Change the current line number - def goto(self, linenum): - if not linenum in self.prog: - print("UNDEFINED LINE NUMBER %d AT LINE %d" % - (linenum, self.stat[self.pc])) - raise RuntimeError - self.pc = self.stat.index(linenum) - - # Run it - def run(self): - self.vars = {} # All variables - self.lists = {} # List variables - self.tables = {} # Tables - self.loops = [] # Currently active loops - self.loopend = {} # Mapping saying where loops end - self.gosub = None # Gosub return point (if any) - self.error = 0 # Indicates program error - - self.stat = list(self.prog) # Ordered list of all line numbers - self.stat.sort() - self.pc = 0 # Current program counter - - # Processing prior to running - - self.collect_data() # Collect all of the data statements - self.check_end() - self.check_loops() - - if self.error: - raise RuntimeError - - while 1: - line = self.stat[self.pc] - instr = self.prog[line] - - op = instr[0] - - # END and STOP statements - if op == 'END' or op == 'STOP': - break # We're done - - # GOTO statement - elif op == 'GOTO': - newline = instr[1] - self.goto(newline) - continue - - # PRINT statement - elif op == 'PRINT': - plist = instr[1] - out = "" - for label, val in plist: - if out: - out += ' ' * (15 - (len(out) % 15)) - out += label - if val: - if label: - out += " " - eval = self.eval(val) - out += str(eval) - sys.stdout.write(out) - end = instr[2] - if not (end == ',' or end == ';'): - sys.stdout.write("\n") - if end == ',': - sys.stdout.write(" " * (15 - (len(out) % 15))) - if end == ';': - sys.stdout.write(" " * (3 - (len(out) % 3))) - - # LET statement - elif op == 'LET': - target = instr[1] - value = instr[2] - self.assign(target, value) - - # READ statement - elif op == 'READ': - for target in instr[1]: - if self.dc < len(self.data): - value = ('NUM', self.data[self.dc]) - self.assign(target, value) - self.dc += 1 - else: - # No more data. Program ends - return - elif op == 'IF': - relop = instr[1] - newline = instr[2] - if (self.releval(relop)): - self.goto(newline) - continue - - elif op == 'FOR': - loopvar = instr[1] - initval = instr[2] - finval = instr[3] - stepval = instr[4] - - # Check to see if this is a new loop - if not self.loops or self.loops[-1][0] != self.pc: - # Looks like a new loop. Make the initial assignment - newvalue = initval - self.assign((loopvar, None, None), initval) - if not stepval: - stepval = ('NUM', 1) - stepval = self.eval(stepval) # Evaluate step here - self.loops.append((self.pc, stepval)) - else: - # It's a repeat of the previous loop - # Update the value of the loop variable according to the - # step - stepval = ('NUM', self.loops[-1][1]) - newvalue = ( - 'BINOP', '+', ('VAR', (loopvar, None, None)), stepval) - - if self.loops[-1][1] < 0: - relop = '>=' - else: - relop = '<=' - if not self.releval(('RELOP', relop, newvalue, finval)): - # Loop is done. Jump to the NEXT - self.pc = self.loopend[self.pc] - self.loops.pop() - else: - self.assign((loopvar, None, None), newvalue) - - elif op == 'NEXT': - if not self.loops: - print("NEXT WITHOUT FOR AT LINE %s" % line) - return - - nextvar = instr[1] - self.pc = self.loops[-1][0] - loopinst = self.prog[self.stat[self.pc]] - forvar = loopinst[1] - if nextvar != forvar: - print("NEXT DOESN'T MATCH FOR AT LINE %s" % line) - return - continue - elif op == 'GOSUB': - newline = instr[1] - if self.gosub: - print("ALREADY IN A SUBROUTINE AT LINE %s" % line) - return - self.gosub = self.stat[self.pc] - self.goto(newline) - continue - - elif op == 'RETURN': - if not self.gosub: - print("RETURN WITHOUT A GOSUB AT LINE %s" % line) - return - self.goto(self.gosub) - self.gosub = None - - elif op == 'FUNC': - fname = instr[1] - pname = instr[2] - expr = instr[3] - - def eval_func(pvalue, name=pname, self=self, expr=expr): - self.assign((pname, None, None), pvalue) - return self.eval(expr) - self.functions[fname] = eval_func - - elif op == 'DIM': - for vname, x, y in instr[1]: - if y == 0: - # Single dimension variable - self.lists[vname] = [0] * x - else: - # Double dimension variable - temp = [0] * y - v = [] - for i in range(x): - v.append(temp[:]) - self.tables[vname] = v - - self.pc += 1 - - # Utility functions for program listing - def expr_str(self, expr): - etype = expr[0] - if etype == 'NUM': - return str(expr[1]) - elif etype == 'GROUP': - return "(%s)" % self.expr_str(expr[1]) - elif etype == 'UNARY': - if expr[1] == '-': - return "-" + str(expr[2]) - elif etype == 'BINOP': - return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3])) - elif etype == 'VAR': - return self.var_str(expr[1]) - - def relexpr_str(self, expr): - return "%s %s %s" % (self.expr_str(expr[2]), expr[1], self.expr_str(expr[3])) - - def var_str(self, var): - varname, dim1, dim2 = var - if not dim1 and not dim2: - return varname - if dim1 and not dim2: - return "%s(%s)" % (varname, self.expr_str(dim1)) - return "%s(%s,%s)" % (varname, self.expr_str(dim1), self.expr_str(dim2)) - - # Create a program listing - def list(self): - stat = list(self.prog) # Ordered list of all line numbers - stat.sort() - for line in stat: - instr = self.prog[line] - op = instr[0] - if op in ['END', 'STOP', 'RETURN']: - print("%s %s" % (line, op)) - continue - elif op == 'REM': - print("%s %s" % (line, instr[1])) - elif op == 'PRINT': - _out = "%s %s " % (line, op) - first = 1 - for p in instr[1]: - if not first: - _out += ", " - if p[0] and p[1]: - _out += '"%s"%s' % (p[0], self.expr_str(p[1])) - elif p[1]: - _out += self.expr_str(p[1]) - else: - _out += '"%s"' % (p[0],) - first = 0 - if instr[2]: - _out += instr[2] - print(_out) - elif op == 'LET': - print("%s LET %s = %s" % - (line, self.var_str(instr[1]), self.expr_str(instr[2]))) - elif op == 'READ': - _out = "%s READ " % line - first = 1 - for r in instr[1]: - if not first: - _out += "," - _out += self.var_str(r) - first = 0 - print(_out) - elif op == 'IF': - print("%s IF %s THEN %d" % - (line, self.relexpr_str(instr[1]), instr[2])) - elif op == 'GOTO' or op == 'GOSUB': - print("%s %s %s" % (line, op, instr[1])) - elif op == 'FOR': - _out = "%s FOR %s = %s TO %s" % ( - line, instr[1], self.expr_str(instr[2]), self.expr_str(instr[3])) - if instr[4]: - _out += " STEP %s" % (self.expr_str(instr[4])) - print(_out) - elif op == 'NEXT': - print("%s NEXT %s" % (line, instr[1])) - elif op == 'FUNC': - print("%s DEF %s(%s) = %s" % - (line, instr[1], instr[2], self.expr_str(instr[3]))) - elif op == 'DIM': - _out = "%s DIM " % line - first = 1 - for vname, x, y in instr[1]: - if not first: - _out += "," - first = 0 - if y == 0: - _out += "%s(%d)" % (vname, x) - else: - _out += "%s(%d,%d)" % (vname, x, y) - - print(_out) - elif op == 'DATA': - _out = "%s DATA " % line - first = 1 - for v in instr[1]: - if not first: - _out += "," - first = 0 - _out += v - print(_out) - - # Erase the current program - def new(self): - self.prog = {} - - # Insert statements - def add_statements(self, prog): - for line, stat in prog.items(): - self.prog[line] = stat - - # Delete a statement - def del_line(self, lineno): - try: - del self.prog[lineno] - except KeyError: - pass diff --git a/xonsh/ply/example/BASIC/basparse.py b/xonsh/ply/example/BASIC/basparse.py deleted file mode 100644 index d610c7d..0000000 --- a/xonsh/ply/example/BASIC/basparse.py +++ /dev/null @@ -1,474 +0,0 @@ -# An implementation of Dartmouth BASIC (1964) -# - -from ply import * -import basiclex - -tokens = basiclex.tokens - -precedence = ( - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('left', 'POWER'), - ('right', 'UMINUS') -) - -# A BASIC program is a series of statements. We represent the program as a -# dictionary of tuples indexed by line number. - - -def p_program(p): - '''program : program statement - | statement''' - - if len(p) == 2 and p[1]: - p[0] = {} - line, stat = p[1] - p[0][line] = stat - elif len(p) == 3: - p[0] = p[1] - if not p[0]: - p[0] = {} - if p[2]: - line, stat = p[2] - p[0][line] = stat - -# This catch-all rule is used for any catastrophic errors. In this case, -# we simply return nothing - - -def p_program_error(p): - '''program : error''' - p[0] = None - p.parser.error = 1 - -# Format of all BASIC statements. - - -def p_statement(p): - '''statement : INTEGER command NEWLINE''' - if isinstance(p[2], str): - print("%s %s %s" % (p[2], "AT LINE", p[1])) - p[0] = None - p.parser.error = 1 - else: - lineno = int(p[1]) - p[0] = (lineno, p[2]) - -# Interactive statements. - - -def p_statement_interactive(p): - '''statement : RUN NEWLINE - | LIST NEWLINE - | NEW NEWLINE''' - p[0] = (0, (p[1], 0)) - -# Blank line number - - -def p_statement_blank(p): - '''statement : INTEGER NEWLINE''' - p[0] = (0, ('BLANK', int(p[1]))) - -# Error handling for malformed statements - - -def p_statement_bad(p): - '''statement : INTEGER error NEWLINE''' - print("MALFORMED STATEMENT AT LINE %s" % p[1]) - p[0] = None - p.parser.error = 1 - -# Blank line - - -def p_statement_newline(p): - '''statement : NEWLINE''' - p[0] = None - -# LET statement - - -def p_command_let(p): - '''command : LET variable EQUALS expr''' - p[0] = ('LET', p[2], p[4]) - - -def p_command_let_bad(p): - '''command : LET variable EQUALS error''' - p[0] = "BAD EXPRESSION IN LET" - -# READ statement - - -def p_command_read(p): - '''command : READ varlist''' - p[0] = ('READ', p[2]) - - -def p_command_read_bad(p): - '''command : READ error''' - p[0] = "MALFORMED VARIABLE LIST IN READ" - -# DATA statement - - -def p_command_data(p): - '''command : DATA numlist''' - p[0] = ('DATA', p[2]) - - -def p_command_data_bad(p): - '''command : DATA error''' - p[0] = "MALFORMED NUMBER LIST IN DATA" - -# PRINT statement - - -def p_command_print(p): - '''command : PRINT plist optend''' - p[0] = ('PRINT', p[2], p[3]) - - -def p_command_print_bad(p): - '''command : PRINT error''' - p[0] = "MALFORMED PRINT STATEMENT" - -# Optional ending on PRINT. Either a comma (,) or semicolon (;) - - -def p_optend(p): - '''optend : COMMA - | SEMI - |''' - if len(p) == 2: - p[0] = p[1] - else: - p[0] = None - -# PRINT statement with no arguments - - -def p_command_print_empty(p): - '''command : PRINT''' - p[0] = ('PRINT', [], None) - -# GOTO statement - - -def p_command_goto(p): - '''command : GOTO INTEGER''' - p[0] = ('GOTO', int(p[2])) - - -def p_command_goto_bad(p): - '''command : GOTO error''' - p[0] = "INVALID LINE NUMBER IN GOTO" - -# IF-THEN statement - - -def p_command_if(p): - '''command : IF relexpr THEN INTEGER''' - p[0] = ('IF', p[2], int(p[4])) - - -def p_command_if_bad(p): - '''command : IF error THEN INTEGER''' - p[0] = "BAD RELATIONAL EXPRESSION" - - -def p_command_if_bad2(p): - '''command : IF relexpr THEN error''' - p[0] = "INVALID LINE NUMBER IN THEN" - -# FOR statement - - -def p_command_for(p): - '''command : FOR ID EQUALS expr TO expr optstep''' - p[0] = ('FOR', p[2], p[4], p[6], p[7]) - - -def p_command_for_bad_initial(p): - '''command : FOR ID EQUALS error TO expr optstep''' - p[0] = "BAD INITIAL VALUE IN FOR STATEMENT" - - -def p_command_for_bad_final(p): - '''command : FOR ID EQUALS expr TO error optstep''' - p[0] = "BAD FINAL VALUE IN FOR STATEMENT" - - -def p_command_for_bad_step(p): - '''command : FOR ID EQUALS expr TO expr STEP error''' - p[0] = "MALFORMED STEP IN FOR STATEMENT" - -# Optional STEP qualifier on FOR statement - - -def p_optstep(p): - '''optstep : STEP expr - | empty''' - if len(p) == 3: - p[0] = p[2] - else: - p[0] = None - -# NEXT statement - - -def p_command_next(p): - '''command : NEXT ID''' - - p[0] = ('NEXT', p[2]) - - -def p_command_next_bad(p): - '''command : NEXT error''' - p[0] = "MALFORMED NEXT" - -# END statement - - -def p_command_end(p): - '''command : END''' - p[0] = ('END',) - -# REM statement - - -def p_command_rem(p): - '''command : REM''' - p[0] = ('REM', p[1]) - -# STOP statement - - -def p_command_stop(p): - '''command : STOP''' - p[0] = ('STOP',) - -# DEF statement - - -def p_command_def(p): - '''command : DEF ID LPAREN ID RPAREN EQUALS expr''' - p[0] = ('FUNC', p[2], p[4], p[7]) - - -def p_command_def_bad_rhs(p): - '''command : DEF ID LPAREN ID RPAREN EQUALS error''' - p[0] = "BAD EXPRESSION IN DEF STATEMENT" - - -def p_command_def_bad_arg(p): - '''command : DEF ID LPAREN error RPAREN EQUALS expr''' - p[0] = "BAD ARGUMENT IN DEF STATEMENT" - -# GOSUB statement - - -def p_command_gosub(p): - '''command : GOSUB INTEGER''' - p[0] = ('GOSUB', int(p[2])) - - -def p_command_gosub_bad(p): - '''command : GOSUB error''' - p[0] = "INVALID LINE NUMBER IN GOSUB" - -# RETURN statement - - -def p_command_return(p): - '''command : RETURN''' - p[0] = ('RETURN',) - -# DIM statement - - -def p_command_dim(p): - '''command : DIM dimlist''' - p[0] = ('DIM', p[2]) - - -def p_command_dim_bad(p): - '''command : DIM error''' - p[0] = "MALFORMED VARIABLE LIST IN DIM" - -# List of variables supplied to DIM statement - - -def p_dimlist(p): - '''dimlist : dimlist COMMA dimitem - | dimitem''' - if len(p) == 4: - p[0] = p[1] - p[0].append(p[3]) - else: - p[0] = [p[1]] - -# DIM items - - -def p_dimitem_single(p): - '''dimitem : ID LPAREN INTEGER RPAREN''' - p[0] = (p[1], eval(p[3]), 0) - - -def p_dimitem_double(p): - '''dimitem : ID LPAREN INTEGER COMMA INTEGER RPAREN''' - p[0] = (p[1], eval(p[3]), eval(p[5])) - -# Arithmetic expressions - - -def p_expr_binary(p): - '''expr : expr PLUS expr - | expr MINUS expr - | expr TIMES expr - | expr DIVIDE expr - | expr POWER expr''' - - p[0] = ('BINOP', p[2], p[1], p[3]) - - -def p_expr_number(p): - '''expr : INTEGER - | FLOAT''' - p[0] = ('NUM', eval(p[1])) - - -def p_expr_variable(p): - '''expr : variable''' - p[0] = ('VAR', p[1]) - - -def p_expr_group(p): - '''expr : LPAREN expr RPAREN''' - p[0] = ('GROUP', p[2]) - - -def p_expr_unary(p): - '''expr : MINUS expr %prec UMINUS''' - p[0] = ('UNARY', '-', p[2]) - -# Relational expressions - - -def p_relexpr(p): - '''relexpr : expr LT expr - | expr LE expr - | expr GT expr - | expr GE expr - | expr EQUALS expr - | expr NE expr''' - p[0] = ('RELOP', p[2], p[1], p[3]) - -# Variables - - -def p_variable(p): - '''variable : ID - | ID LPAREN expr RPAREN - | ID LPAREN expr COMMA expr RPAREN''' - if len(p) == 2: - p[0] = (p[1], None, None) - elif len(p) == 5: - p[0] = (p[1], p[3], None) - else: - p[0] = (p[1], p[3], p[5]) - -# Builds a list of variable targets as a Python list - - -def p_varlist(p): - '''varlist : varlist COMMA variable - | variable''' - if len(p) > 2: - p[0] = p[1] - p[0].append(p[3]) - else: - p[0] = [p[1]] - - -# Builds a list of numbers as a Python list - -def p_numlist(p): - '''numlist : numlist COMMA number - | number''' - - if len(p) > 2: - p[0] = p[1] - p[0].append(p[3]) - else: - p[0] = [p[1]] - -# A number. May be an integer or a float - - -def p_number(p): - '''number : INTEGER - | FLOAT''' - p[0] = eval(p[1]) - -# A signed number. - - -def p_number_signed(p): - '''number : MINUS INTEGER - | MINUS FLOAT''' - p[0] = eval("-" + p[2]) - -# List of targets for a print statement -# Returns a list of tuples (label,expr) - - -def p_plist(p): - '''plist : plist COMMA pitem - | pitem''' - if len(p) > 3: - p[0] = p[1] - p[0].append(p[3]) - else: - p[0] = [p[1]] - - -def p_item_string(p): - '''pitem : STRING''' - p[0] = (p[1][1:-1], None) - - -def p_item_string_expr(p): - '''pitem : STRING expr''' - p[0] = (p[1][1:-1], p[2]) - - -def p_item_expr(p): - '''pitem : expr''' - p[0] = ("", p[1]) - -# Empty - - -def p_empty(p): - '''empty : ''' - -# Catastrophic error handler - - -def p_error(p): - if not p: - print("SYNTAX ERROR AT EOF") - -bparser = yacc.yacc() - - -def parse(data, debug=0): - bparser.error = 0 - p = bparser.parse(data, debug=debug) - if bparser.error: - return None - return p diff --git a/xonsh/ply/example/BASIC/dim.bas b/xonsh/ply/example/BASIC/dim.bas deleted file mode 100644 index 87bd95b..0000000 --- a/xonsh/ply/example/BASIC/dim.bas +++ /dev/null @@ -1,14 +0,0 @@ -5 DIM A(50,15) -10 FOR I = 1 TO 50 -20 FOR J = 1 TO 15 -30 LET A(I,J) = I + J -35 REM PRINT I,J, A(I,J) -40 NEXT J -50 NEXT I -100 FOR I = 1 TO 50 -110 FOR J = 1 TO 15 -120 PRINT A(I,J), -130 NEXT J -140 PRINT -150 NEXT I -999 END diff --git a/xonsh/ply/example/BASIC/func.bas b/xonsh/ply/example/BASIC/func.bas deleted file mode 100644 index 447ee16..0000000 --- a/xonsh/ply/example/BASIC/func.bas +++ /dev/null @@ -1,5 +0,0 @@ -10 DEF FDX(X) = 2*X -20 FOR I = 0 TO 100 -30 PRINT FDX(I) -40 NEXT I -50 END diff --git a/xonsh/ply/example/BASIC/gcd.bas b/xonsh/ply/example/BASIC/gcd.bas deleted file mode 100644 index d0b7746..0000000 --- a/xonsh/ply/example/BASIC/gcd.bas +++ /dev/null @@ -1,22 +0,0 @@ -10 PRINT "A","B","C","GCD" -20 READ A,B,C -30 LET X = A -40 LET Y = B -50 GOSUB 200 -60 LET X = G -70 LET Y = C -80 GOSUB 200 -90 PRINT A, B, C, G -100 GOTO 20 -110 DATA 60, 90, 120 -120 DATA 38456, 64872, 98765 -130 DATA 32, 384, 72 -200 LET Q = INT(X/Y) -210 LET R = X - Q*Y -220 IF R = 0 THEN 300 -230 LET X = Y -240 LET Y = R -250 GOTO 200 -300 LET G = Y -310 RETURN -999 END diff --git a/xonsh/ply/example/BASIC/gosub.bas b/xonsh/ply/example/BASIC/gosub.bas deleted file mode 100644 index 99737b1..0000000 --- a/xonsh/ply/example/BASIC/gosub.bas +++ /dev/null @@ -1,13 +0,0 @@ -100 LET X = 3 -110 GOSUB 400 -120 PRINT U, V, W -200 LET X = 5 -210 GOSUB 400 -220 LET Z = U + 2*V + 3*W -230 PRINT Z -240 GOTO 999 -400 LET U = X*X -410 LET V = X*X*X -420 LET W = X*X*X*X + X*X*X + X*X + X -430 RETURN -999 END diff --git a/xonsh/ply/example/BASIC/hello.bas b/xonsh/ply/example/BASIC/hello.bas deleted file mode 100644 index cc6f0b0..0000000 --- a/xonsh/ply/example/BASIC/hello.bas +++ /dev/null @@ -1,4 +0,0 @@ -5 REM HELLO WORLD PROGAM -10 PRINT "HELLO WORLD" -99 END - diff --git a/xonsh/ply/example/BASIC/linear.bas b/xonsh/ply/example/BASIC/linear.bas deleted file mode 100644 index 56c0822..0000000 --- a/xonsh/ply/example/BASIC/linear.bas +++ /dev/null @@ -1,17 +0,0 @@ -1 REM ::: SOLVE A SYSTEM OF LINEAR EQUATIONS -2 REM ::: A1*X1 + A2*X2 = B1 -3 REM ::: A3*X1 + A4*X2 = B2 -4 REM -------------------------------------- -10 READ A1, A2, A3, A4 -15 LET D = A1 * A4 - A3 * A2 -20 IF D = 0 THEN 65 -30 READ B1, B2 -37 LET X1 = (B1*A4 - B2*A2) / D -42 LET X2 = (A1*B2 - A3*B1) / D -55 PRINT X1, X2 -60 GOTO 30 -65 PRINT "NO UNIQUE SOLUTION" -70 DATA 1, 2, 4 -80 DATA 2, -7, 5 -85 DATA 1, 3, 4, -7 -90 END diff --git a/xonsh/ply/example/BASIC/maxsin.bas b/xonsh/ply/example/BASIC/maxsin.bas deleted file mode 100644 index b969015..0000000 --- a/xonsh/ply/example/BASIC/maxsin.bas +++ /dev/null @@ -1,12 +0,0 @@ -5 PRINT "X VALUE", "SINE", "RESOLUTION" -10 READ D -20 LET M = -1 -30 FOR X = 0 TO 3 STEP D -40 IF SIN(X) <= M THEN 80 -50 LET X0 = X -60 LET M = SIN(X) -80 NEXT X -85 PRINT X0, M, D -90 GOTO 10 -100 DATA .1, .01, .001 -110 END diff --git a/xonsh/ply/example/BASIC/powers.bas b/xonsh/ply/example/BASIC/powers.bas deleted file mode 100644 index a454dc3..0000000 --- a/xonsh/ply/example/BASIC/powers.bas +++ /dev/null @@ -1,13 +0,0 @@ -5 PRINT "THIS PROGRAM COMPUTES AND PRINTS THE NTH POWERS" -6 PRINT "OF THE NUMBERS LESS THAN OR EQUAL TO N FOR VARIOUS" -7 PRINT "N FROM 1 THROUGH 7" -8 PRINT -10 FOR N = 1 TO 7 -15 PRINT "N = "N -20 FOR I = 1 TO N -30 PRINT I^N, -40 NEXT I -50 PRINT -60 PRINT -70 NEXT N -80 END diff --git a/xonsh/ply/example/BASIC/rand.bas b/xonsh/ply/example/BASIC/rand.bas deleted file mode 100644 index 4ff7a14..0000000 --- a/xonsh/ply/example/BASIC/rand.bas +++ /dev/null @@ -1,4 +0,0 @@ -10 FOR I = 1 TO 20 -20 PRINT INT(10*RND(0)) -30 NEXT I -40 END diff --git a/xonsh/ply/example/BASIC/sales.bas b/xonsh/ply/example/BASIC/sales.bas deleted file mode 100644 index a39aefb..0000000 --- a/xonsh/ply/example/BASIC/sales.bas +++ /dev/null @@ -1,20 +0,0 @@ -10 FOR I = 1 TO 3 -20 READ P(I) -30 NEXT I -40 FOR I = 1 TO 3 -50 FOR J = 1 TO 5 -60 READ S(I,J) -70 NEXT J -80 NEXT I -90 FOR J = 1 TO 5 -100 LET S = 0 -110 FOR I = 1 TO 3 -120 LET S = S + P(I) * S(I,J) -130 NEXT I -140 PRINT "TOTAL SALES FOR SALESMAN"J, "$"S -150 NEXT J -200 DATA 1.25, 4.30, 2.50 -210 DATA 40, 20, 37, 29, 42 -220 DATA 10, 16, 3, 21, 8 -230 DATA 35, 47, 29, 16, 33 -300 END diff --git a/xonsh/ply/example/BASIC/sears.bas b/xonsh/ply/example/BASIC/sears.bas deleted file mode 100644 index 5ced397..0000000 --- a/xonsh/ply/example/BASIC/sears.bas +++ /dev/null @@ -1,18 +0,0 @@ -1 REM :: THIS PROGRAM COMPUTES HOW MANY TIMES YOU HAVE TO FOLD -2 REM :: A PIECE OF PAPER SO THAT IT IS TALLER THAN THE -3 REM :: SEARS TOWER. -4 REM :: S = HEIGHT OF TOWER (METERS) -5 REM :: T = THICKNESS OF PAPER (MILLIMETERS) -10 LET S = 442 -20 LET T = 0.1 -30 REM CONVERT T TO METERS -40 LET T = T * .001 -50 LET F = 1 -60 LET H = T -100 IF H > S THEN 200 -120 LET H = 2 * H -125 LET F = F + 1 -130 GOTO 100 -200 PRINT "NUMBER OF FOLDS ="F -220 PRINT "FINAL HEIGHT ="H -999 END diff --git a/xonsh/ply/example/BASIC/sqrt1.bas b/xonsh/ply/example/BASIC/sqrt1.bas deleted file mode 100644 index 6673a91..0000000 --- a/xonsh/ply/example/BASIC/sqrt1.bas +++ /dev/null @@ -1,5 +0,0 @@ -10 LET X = 0 -20 LET X = X + 1 -30 PRINT X, SQR(X) -40 IF X < 100 THEN 20 -50 END diff --git a/xonsh/ply/example/BASIC/sqrt2.bas b/xonsh/ply/example/BASIC/sqrt2.bas deleted file mode 100644 index 862d85e..0000000 --- a/xonsh/ply/example/BASIC/sqrt2.bas +++ /dev/null @@ -1,4 +0,0 @@ -10 FOR X = 1 TO 100 -20 PRINT X, SQR(X) -30 NEXT X -40 END diff --git a/xonsh/ply/example/GardenSnake/GardenSnake.py b/xonsh/ply/example/GardenSnake/GardenSnake.py deleted file mode 100644 index 8b493b4..0000000 --- a/xonsh/ply/example/GardenSnake/GardenSnake.py +++ /dev/null @@ -1,777 +0,0 @@ -# GardenSnake - a parser generator demonstration program -# -# This implements a modified version of a subset of Python: -# - only 'def', 'return' and 'if' statements -# - 'if' only has 'then' clause (no elif nor else) -# - single-quoted strings only, content in raw format -# - numbers are decimal.Decimal instances (not integers or floats) -# - no print statment; use the built-in 'print' function -# - only < > == + - / * implemented (and unary + -) -# - assignment and tuple assignment work -# - no generators of any sort -# - no ... well, no quite a lot - -# Why? I'm thinking about a new indentation-based configuration -# language for a project and wanted to figure out how to do it. Once -# I got that working I needed a way to test it out. My original AST -# was dumb so I decided to target Python's AST and compile it into -# Python code. Plus, it's pretty cool that it only took a day or so -# from sitting down with Ply to having working code. - -# This uses David Beazley's Ply from http://www.dabeaz.com/ply/ - -# This work is hereby released into the Public Domain. To view a copy of -# the public domain dedication, visit -# http://creativecommons.org/licenses/publicdomain/ or send a letter to -# Creative Commons, 543 Howard Street, 5th Floor, San Francisco, -# California, 94105, USA. -# -# Portions of this work are derived from Python's Grammar definition -# and may be covered under the Python copyright and license -# -# Andrew Dalke / Dalke Scientific Software, LLC -# 30 August 2006 / Cape Town, South Africa - -# Changelog: -# 30 August - added link to CC license; removed the "swapcase" encoding - -# Modifications for inclusion in PLY distribution -import sys -sys.path.insert(0, "../..") -from ply import * - -##### Lexer ###### -#import lex -import decimal - -tokens = ( - 'DEF', - 'IF', - 'NAME', - 'NUMBER', # Python decimals - 'STRING', # single quoted strings only; syntax of raw strings - 'LPAR', - 'RPAR', - 'COLON', - 'EQ', - 'ASSIGN', - 'LT', - 'GT', - 'PLUS', - 'MINUS', - 'MULT', - 'DIV', - 'RETURN', - 'WS', - 'NEWLINE', - 'COMMA', - 'SEMICOLON', - 'INDENT', - 'DEDENT', - 'ENDMARKER', -) - -#t_NUMBER = r'\d+' -# taken from decmial.py but without the leading sign - - -def t_NUMBER(t): - r"""(\d+(\.\d*)?|\.\d+)([eE][-+]? \d+)?""" - t.value = decimal.Decimal(t.value) - return t - - -def t_STRING(t): - r"'([^\\']+|\\'|\\\\)*'" # I think this is right ... - t.value = t.value[1:-1].decode("string-escape") # .swapcase() # for fun - return t - -t_COLON = r':' -t_EQ = r'==' -t_ASSIGN = r'=' -t_LT = r'<' -t_GT = r'>' -t_PLUS = r'\+' -t_MINUS = r'-' -t_MULT = r'\*' -t_DIV = r'/' -t_COMMA = r',' -t_SEMICOLON = r';' - -# Ply nicely documented how to do this. - -RESERVED = { - "def": "DEF", - "if": "IF", - "return": "RETURN", -} - - -def t_NAME(t): - r'[a-zA-Z_][a-zA-Z0-9_]*' - t.type = RESERVED.get(t.value, "NAME") - return t - -# Putting this before t_WS let it consume lines with only comments in -# them so the latter code never sees the WS part. Not consuming the -# newline. Needed for "if 1: #comment" - - -def t_comment(t): - r"[ ]*\043[^\n]*" # \043 is '#' - pass - - -# Whitespace -def t_WS(t): - r' [ ]+ ' - if t.lexer.at_line_start and t.lexer.paren_count == 0: - return t - -# Don't generate newline tokens when inside of parenthesis, eg -# a = (1, -# 2, 3) - - -def t_newline(t): - r'\n+' - t.lexer.lineno += len(t.value) - t.type = "NEWLINE" - if t.lexer.paren_count == 0: - return t - - -def t_LPAR(t): - r'\(' - t.lexer.paren_count += 1 - return t - - -def t_RPAR(t): - r'\)' - # check for underflow? should be the job of the parser - t.lexer.paren_count -= 1 - return t - - -def t_error(t): - raise SyntaxError("Unknown symbol %r" % (t.value[0],)) - print "Skipping", repr(t.value[0]) - t.lexer.skip(1) - -# I implemented INDENT / DEDENT generation as a post-processing filter - -# The original lex token stream contains WS and NEWLINE characters. -# WS will only occur before any other tokens on a line. - -# I have three filters. One tags tokens by adding two attributes. -# "must_indent" is True if the token must be indented from the -# previous code. The other is "at_line_start" which is True for WS -# and the first non-WS/non-NEWLINE on a line. It flags the check so -# see if the new line has changed indication level. - -# Python's syntax has three INDENT states -# 0) no colon hence no need to indent -# 1) "if 1: go()" - simple statements have a COLON but no need for an indent -# 2) "if 1:\n go()" - complex statements have a COLON NEWLINE and must indent -NO_INDENT = 0 -MAY_INDENT = 1 -MUST_INDENT = 2 - -# only care about whitespace at the start of a line - - -def track_tokens_filter(lexer, tokens): - lexer.at_line_start = at_line_start = True - indent = NO_INDENT - saw_colon = False - for token in tokens: - token.at_line_start = at_line_start - - if token.type == "COLON": - at_line_start = False - indent = MAY_INDENT - token.must_indent = False - - elif token.type == "NEWLINE": - at_line_start = True - if indent == MAY_INDENT: - indent = MUST_INDENT - token.must_indent = False - - elif token.type == "WS": - assert token.at_line_start == True - at_line_start = True - token.must_indent = False - - else: - # A real token; only indent after COLON NEWLINE - if indent == MUST_INDENT: - token.must_indent = True - else: - token.must_indent = False - at_line_start = False - indent = NO_INDENT - - yield token - lexer.at_line_start = at_line_start - - -def _new_token(type, lineno): - tok = lex.LexToken() - tok.type = type - tok.value = None - tok.lineno = lineno - return tok - -# Synthesize a DEDENT tag - - -def DEDENT(lineno): - return _new_token("DEDENT", lineno) - -# Synthesize an INDENT tag - - -def INDENT(lineno): - return _new_token("INDENT", lineno) - - -# Track the indentation level and emit the right INDENT / DEDENT events. -def indentation_filter(tokens): - # A stack of indentation levels; will never pop item 0 - levels = [0] - token = None - depth = 0 - prev_was_ws = False - for token in tokens: - # if 1: - # print "Process", token, - # if token.at_line_start: - # print "at_line_start", - # if token.must_indent: - # print "must_indent", - # print - - # WS only occurs at the start of the line - # There may be WS followed by NEWLINE so - # only track the depth here. Don't indent/dedent - # until there's something real. - if token.type == "WS": - assert depth == 0 - depth = len(token.value) - prev_was_ws = True - # WS tokens are never passed to the parser - continue - - if token.type == "NEWLINE": - depth = 0 - if prev_was_ws or token.at_line_start: - # ignore blank lines - continue - # pass the other cases on through - yield token - continue - - # then it must be a real token (not WS, not NEWLINE) - # which can affect the indentation level - - prev_was_ws = False - if token.must_indent: - # The current depth must be larger than the previous level - if not (depth > levels[-1]): - raise IndentationError("expected an indented block") - - levels.append(depth) - yield INDENT(token.lineno) - - elif token.at_line_start: - # Must be on the same level or one of the previous levels - if depth == levels[-1]: - # At the same level - pass - elif depth > levels[-1]: - raise IndentationError( - "indentation increase but not in new block") - else: - # Back up; but only if it matches a previous level - try: - i = levels.index(depth) - except ValueError: - raise IndentationError("inconsistent indentation") - for _ in range(i + 1, len(levels)): - yield DEDENT(token.lineno) - levels.pop() - - yield token - - ### Finished processing ### - - # Must dedent any remaining levels - if len(levels) > 1: - assert token is not None - for _ in range(1, len(levels)): - yield DEDENT(token.lineno) - - -# The top-level filter adds an ENDMARKER, if requested. -# Python's grammar uses it. -def filter(lexer, add_endmarker=True): - token = None - tokens = iter(lexer.token, None) - tokens = track_tokens_filter(lexer, tokens) - for token in indentation_filter(tokens): - yield token - - if add_endmarker: - lineno = 1 - if token is not None: - lineno = token.lineno - yield _new_token("ENDMARKER", lineno) - -# Combine Ply and my filters into a new lexer - - -class IndentLexer(object): - - def __init__(self, debug=0, optimize=0, lextab='lextab', reflags=0): - self.lexer = lex.lex(debug=debug, optimize=optimize, - lextab=lextab, reflags=reflags) - self.token_stream = None - - def input(self, s, add_endmarker=True): - self.lexer.paren_count = 0 - self.lexer.input(s) - self.token_stream = filter(self.lexer, add_endmarker) - - def token(self): - try: - return self.token_stream.next() - except StopIteration: - return None - -########## Parser (tokens -> AST) ###### - -# also part of Ply -#import yacc - -# I use the Python AST -from compiler import ast - -# Helper function - - -def Assign(left, right): - names = [] - if isinstance(left, ast.Name): - # Single assignment on left - return ast.Assign([ast.AssName(left.name, 'OP_ASSIGN')], right) - elif isinstance(left, ast.Tuple): - # List of things - make sure they are Name nodes - names = [] - for child in left.getChildren(): - if not isinstance(child, ast.Name): - raise SyntaxError("that assignment not supported") - names.append(child.name) - ass_list = [ast.AssName(name, 'OP_ASSIGN') for name in names] - return ast.Assign([ast.AssTuple(ass_list)], right) - else: - raise SyntaxError("Can't do that yet") - - -# The grammar comments come from Python's Grammar/Grammar file - -# NB: compound_stmt in single_input is followed by extra NEWLINE! -# file_input: (NEWLINE | stmt)* ENDMARKER -def p_file_input_end(p): - """file_input_end : file_input ENDMARKER""" - p[0] = ast.Stmt(p[1]) - - -def p_file_input(p): - """file_input : file_input NEWLINE - | file_input stmt - | NEWLINE - | stmt""" - if isinstance(p[len(p) - 1], basestring): - if len(p) == 3: - p[0] = p[1] - else: - p[0] = [] # p == 2 --> only a blank line - else: - if len(p) == 3: - p[0] = p[1] + p[2] - else: - p[0] = p[1] - - -# funcdef: [decorators] 'def' NAME parameters ':' suite -# ignoring decorators -def p_funcdef(p): - "funcdef : DEF NAME parameters COLON suite" - p[0] = ast.Function(None, p[2], tuple(p[3]), (), 0, None, p[5]) - -# parameters: '(' [varargslist] ')' - - -def p_parameters(p): - """parameters : LPAR RPAR - | LPAR varargslist RPAR""" - if len(p) == 3: - p[0] = [] - else: - p[0] = p[2] - - -# varargslist: (fpdef ['=' test] ',')* ('*' NAME [',' '**' NAME] | '**' NAME) | -# highly simplified -def p_varargslist(p): - """varargslist : varargslist COMMA NAME - | NAME""" - if len(p) == 4: - p[0] = p[1] + p[3] - else: - p[0] = [p[1]] - -# stmt: simple_stmt | compound_stmt - - -def p_stmt_simple(p): - """stmt : simple_stmt""" - # simple_stmt is a list - p[0] = p[1] - - -def p_stmt_compound(p): - """stmt : compound_stmt""" - p[0] = [p[1]] - -# simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE - - -def p_simple_stmt(p): - """simple_stmt : small_stmts NEWLINE - | small_stmts SEMICOLON NEWLINE""" - p[0] = p[1] - - -def p_small_stmts(p): - """small_stmts : small_stmts SEMICOLON small_stmt - | small_stmt""" - if len(p) == 4: - p[0] = p[1] + [p[3]] - else: - p[0] = [p[1]] - -# small_stmt: expr_stmt | print_stmt | del_stmt | pass_stmt | flow_stmt | -# import_stmt | global_stmt | exec_stmt | assert_stmt - - -def p_small_stmt(p): - """small_stmt : flow_stmt - | expr_stmt""" - p[0] = p[1] - -# expr_stmt: testlist (augassign (yield_expr|testlist) | -# ('=' (yield_expr|testlist))*) -# augassign: ('+=' | '-=' | '*=' | '/=' | '%=' | '&=' | '|=' | '^=' | -# '<<=' | '>>=' | '**=' | '//=') - - -def p_expr_stmt(p): - """expr_stmt : testlist ASSIGN testlist - | testlist """ - if len(p) == 2: - # a list of expressions - p[0] = ast.Discard(p[1]) - else: - p[0] = Assign(p[1], p[3]) - - -def p_flow_stmt(p): - "flow_stmt : return_stmt" - p[0] = p[1] - -# return_stmt: 'return' [testlist] - - -def p_return_stmt(p): - "return_stmt : RETURN testlist" - p[0] = ast.Return(p[2]) - - -def p_compound_stmt(p): - """compound_stmt : if_stmt - | funcdef""" - p[0] = p[1] - - -def p_if_stmt(p): - 'if_stmt : IF test COLON suite' - p[0] = ast.If([(p[2], p[4])], None) - - -def p_suite(p): - """suite : simple_stmt - | NEWLINE INDENT stmts DEDENT""" - if len(p) == 2: - p[0] = ast.Stmt(p[1]) - else: - p[0] = ast.Stmt(p[3]) - - -def p_stmts(p): - """stmts : stmts stmt - | stmt""" - if len(p) == 3: - p[0] = p[1] + p[2] - else: - p[0] = p[1] - -# No using Python's approach because Ply supports precedence - -# comparison: expr (comp_op expr)* -# arith_expr: term (('+'|'-') term)* -# term: factor (('*'|'/'|'%'|'//') factor)* -# factor: ('+'|'-'|'~') factor | power -# comp_op: '<'|'>'|'=='|'>='|'<='|'<>'|'!='|'in'|'not' 'in'|'is'|'is' 'not' - - -def make_lt_compare((left, right)): - return ast.Compare(left, [('<', right), ]) - - -def make_gt_compare((left, right)): - return ast.Compare(left, [('>', right), ]) - - -def make_eq_compare((left, right)): - return ast.Compare(left, [('==', right), ]) - - -binary_ops = { - "+": ast.Add, - "-": ast.Sub, - "*": ast.Mul, - "/": ast.Div, - "<": make_lt_compare, - ">": make_gt_compare, - "==": make_eq_compare, -} -unary_ops = { - "+": ast.UnaryAdd, - "-": ast.UnarySub, -} -precedence = ( - ("left", "EQ", "GT", "LT"), - ("left", "PLUS", "MINUS"), - ("left", "MULT", "DIV"), -) - - -def p_comparison(p): - """comparison : comparison PLUS comparison - | comparison MINUS comparison - | comparison MULT comparison - | comparison DIV comparison - | comparison LT comparison - | comparison EQ comparison - | comparison GT comparison - | PLUS comparison - | MINUS comparison - | power""" - if len(p) == 4: - p[0] = binary_ops[p[2]]((p[1], p[3])) - elif len(p) == 3: - p[0] = unary_ops[p[1]](p[2]) - else: - p[0] = p[1] - -# power: atom trailer* ['**' factor] -# trailers enables function calls. I only allow one level of calls -# so this is 'trailer' - - -def p_power(p): - """power : atom - | atom trailer""" - if len(p) == 2: - p[0] = p[1] - else: - if p[2][0] == "CALL": - p[0] = ast.CallFunc(p[1], p[2][1], None, None) - else: - raise AssertionError("not implemented") - - -def p_atom_name(p): - """atom : NAME""" - p[0] = ast.Name(p[1]) - - -def p_atom_number(p): - """atom : NUMBER - | STRING""" - p[0] = ast.Const(p[1]) - - -def p_atom_tuple(p): - """atom : LPAR testlist RPAR""" - p[0] = p[2] - -# trailer: '(' [arglist] ')' | '[' subscriptlist ']' | '.' NAME - - -def p_trailer(p): - "trailer : LPAR arglist RPAR" - p[0] = ("CALL", p[2]) - -# testlist: test (',' test)* [','] -# Contains shift/reduce error - - -def p_testlist(p): - """testlist : testlist_multi COMMA - | testlist_multi """ - if len(p) == 2: - p[0] = p[1] - else: - # May need to promote singleton to tuple - if isinstance(p[1], list): - p[0] = p[1] - else: - p[0] = [p[1]] - # Convert into a tuple? - if isinstance(p[0], list): - p[0] = ast.Tuple(p[0]) - - -def p_testlist_multi(p): - """testlist_multi : testlist_multi COMMA test - | test""" - if len(p) == 2: - # singleton - p[0] = p[1] - else: - if isinstance(p[1], list): - p[0] = p[1] + [p[3]] - else: - # singleton -> tuple - p[0] = [p[1], p[3]] - - -# test: or_test ['if' or_test 'else' test] | lambdef -# as I don't support 'and', 'or', and 'not' this works down to 'comparison' -def p_test(p): - "test : comparison" - p[0] = p[1] - - -# arglist: (argument ',')* (argument [',']| '*' test [',' '**' test] | '**' test) -# XXX INCOMPLETE: this doesn't allow the trailing comma -def p_arglist(p): - """arglist : arglist COMMA argument - | argument""" - if len(p) == 4: - p[0] = p[1] + [p[3]] - else: - p[0] = [p[1]] - -# argument: test [gen_for] | test '=' test # Really [keyword '='] test - - -def p_argument(p): - "argument : test" - p[0] = p[1] - - -def p_error(p): - # print "Error!", repr(p) - raise SyntaxError(p) - - -class GardenSnakeParser(object): - - def __init__(self, lexer=None): - if lexer is None: - lexer = IndentLexer() - self.lexer = lexer - self.parser = yacc.yacc(start="file_input_end") - - def parse(self, code): - self.lexer.input(code) - result = self.parser.parse(lexer=self.lexer) - return ast.Module(None, result) - - -###### Code generation ###### - -from compiler import misc, syntax, pycodegen - - -class GardenSnakeCompiler(object): - - def __init__(self): - self.parser = GardenSnakeParser() - - def compile(self, code, filename=""): - tree = self.parser.parse(code) - # print tree - misc.set_filename(filename, tree) - syntax.check(tree) - gen = pycodegen.ModuleCodeGenerator(tree) - code = gen.getCode() - return code - -####### Test code ####### - -compile = GardenSnakeCompiler().compile - -code = r""" - -print('LET\'S TRY THIS \\OUT') - -#Comment here -def x(a): - print('called with',a) - if a == 1: - return 2 - if a*2 > 10: return 999 / 4 - # Another comment here - - return a+2*3 - -ints = (1, 2, - 3, 4, -5) -print('mutiline-expression', ints) - -t = 4+1/3*2+6*(9-5+1) -print('predence test; should be 34+2/3:', t, t==(34+2/3)) - -print('numbers', 1,2,3,4,5) -if 1: - 8 - a=9 - print(x(a)) - -print(x(1)) -print(x(2)) -print(x(8),'3') -print('this is decimal', 1/5) -print('BIG DECIMAL', 1.234567891234567e12345) - -""" - -# Set up the GardenSnake run-time environment - - -def print_(*args): - print "-->", " ".join(map(str, args)) - -globals()["print"] = print_ - -compiled_code = compile(code) - -exec compiled_code in globals() -print "Done" diff --git a/xonsh/ply/example/GardenSnake/README b/xonsh/ply/example/GardenSnake/README deleted file mode 100644 index 4d8be2d..0000000 --- a/xonsh/ply/example/GardenSnake/README +++ /dev/null @@ -1,5 +0,0 @@ -This example is Andrew Dalke's GardenSnake language. It shows how to process an -indentation-like language like Python. Further details can be found here: - -http://dalkescientific.com/writings/diary/archive/2006/08/30/gardensnake_language.html - diff --git a/xonsh/ply/example/README b/xonsh/ply/example/README deleted file mode 100644 index 63519b5..0000000 --- a/xonsh/ply/example/README +++ /dev/null @@ -1,10 +0,0 @@ -Simple examples: - calc - Simple calculator - classcalc - Simple calculate defined as a class - -Complex examples - ansic - ANSI C grammar from K&R - BASIC - A small BASIC interpreter - GardenSnake - A simple python-like language - yply - Converts Unix yacc files to PLY programs. - diff --git a/xonsh/ply/example/ansic/README b/xonsh/ply/example/ansic/README deleted file mode 100644 index e049d3b..0000000 --- a/xonsh/ply/example/ansic/README +++ /dev/null @@ -1,2 +0,0 @@ -This example is incomplete. Was going to specify an ANSI C parser. -This is part of it. diff --git a/xonsh/ply/example/ansic/clex.py b/xonsh/ply/example/ansic/clex.py deleted file mode 100644 index 4bde1d7..0000000 --- a/xonsh/ply/example/ansic/clex.py +++ /dev/null @@ -1,168 +0,0 @@ -# ---------------------------------------------------------------------- -# clex.py -# -# A lexer for ANSI C. -# ---------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -import ply.lex as lex - -# Reserved words -reserved = ( - 'AUTO', 'BREAK', 'CASE', 'CHAR', 'CONST', 'CONTINUE', 'DEFAULT', 'DO', 'DOUBLE', - 'ELSE', 'ENUM', 'EXTERN', 'FLOAT', 'FOR', 'GOTO', 'IF', 'INT', 'LONG', 'REGISTER', - 'RETURN', 'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'SWITCH', 'TYPEDEF', - 'UNION', 'UNSIGNED', 'VOID', 'VOLATILE', 'WHILE', -) - -tokens = reserved + ( - # Literals (identifier, integer constant, float constant, string constant, - # char const) - 'ID', 'TYPEID', 'ICONST', 'FCONST', 'SCONST', 'CCONST', - - # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) - 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MOD', - 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', - 'LOR', 'LAND', 'LNOT', - 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', - - # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) - 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', - 'LSHIFTEQUAL', 'RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', - - # Increment/decrement (++,--) - 'PLUSPLUS', 'MINUSMINUS', - - # Structure dereference (->) - 'ARROW', - - # Conditional operator (?) - 'CONDOP', - - # Delimeters ( ) [ ] { } , . ; : - 'LPAREN', 'RPAREN', - 'LBRACKET', 'RBRACKET', - 'LBRACE', 'RBRACE', - 'COMMA', 'PERIOD', 'SEMI', 'COLON', - - # Ellipsis (...) - 'ELLIPSIS', -) - -# Completely ignored characters -t_ignore = ' \t\x0c' - -# Newlines - - -def t_NEWLINE(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -# Operators -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_MOD = r'%' -t_OR = r'\|' -t_AND = r'&' -t_NOT = r'~' -t_XOR = r'\^' -t_LSHIFT = r'<<' -t_RSHIFT = r'>>' -t_LOR = r'\|\|' -t_LAND = r'&&' -t_LNOT = r'!' -t_LT = r'<' -t_GT = r'>' -t_LE = r'<=' -t_GE = r'>=' -t_EQ = r'==' -t_NE = r'!=' - -# Assignment operators - -t_EQUALS = r'=' -t_TIMESEQUAL = r'\*=' -t_DIVEQUAL = r'/=' -t_MODEQUAL = r'%=' -t_PLUSEQUAL = r'\+=' -t_MINUSEQUAL = r'-=' -t_LSHIFTEQUAL = r'<<=' -t_RSHIFTEQUAL = r'>>=' -t_ANDEQUAL = r'&=' -t_OREQUAL = r'\|=' -t_XOREQUAL = r'\^=' - -# Increment/decrement -t_PLUSPLUS = r'\+\+' -t_MINUSMINUS = r'--' - -# -> -t_ARROW = r'->' - -# ? -t_CONDOP = r'\?' - -# Delimeters -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_LBRACKET = r'\[' -t_RBRACKET = r'\]' -t_LBRACE = r'\{' -t_RBRACE = r'\}' -t_COMMA = r',' -t_PERIOD = r'\.' -t_SEMI = r';' -t_COLON = r':' -t_ELLIPSIS = r'\.\.\.' - -# Identifiers and reserved words - -reserved_map = {} -for r in reserved: - reserved_map[r.lower()] = r - - -def t_ID(t): - r'[A-Za-z_][\w_]*' - t.type = reserved_map.get(t.value, "ID") - return t - -# Integer literal -t_ICONST = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' - -# Floating literal -t_FCONST = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' - -# String literal -t_SCONST = r'\"([^\\\n]|(\\.))*?\"' - -# Character constant 'c' or L'c' -t_CCONST = r'(L)?\'([^\\\n]|(\\.))*?\'' - -# Comments - - -def t_comment(t): - r'/\*(.|\n)*?\*/' - t.lexer.lineno += t.value.count('\n') - -# Preprocessor directive (ignored) - - -def t_preprocessor(t): - r'\#(.)*?\n' - t.lexer.lineno += 1 - - -def t_error(t): - print("Illegal character %s" % repr(t.value[0])) - t.lexer.skip(1) - -lexer = lex.lex() -if __name__ == "__main__": - lex.runmain(lexer) diff --git a/xonsh/ply/example/ansic/cparse.py b/xonsh/ply/example/ansic/cparse.py deleted file mode 100644 index 5fe9bce..0000000 --- a/xonsh/ply/example/ansic/cparse.py +++ /dev/null @@ -1,1048 +0,0 @@ -# ----------------------------------------------------------------------------- -# cparse.py -# -# Simple parser for ANSI C. Based on the grammar in K&R, 2nd Ed. -# ----------------------------------------------------------------------------- - -import sys -import clex -import ply.yacc as yacc - -# Get the token map -tokens = clex.tokens - -# translation-unit: - - -def p_translation_unit_1(t): - 'translation_unit : external_declaration' - pass - - -def p_translation_unit_2(t): - 'translation_unit : translation_unit external_declaration' - pass - -# external-declaration: - - -def p_external_declaration_1(t): - 'external_declaration : function_definition' - pass - - -def p_external_declaration_2(t): - 'external_declaration : declaration' - pass - -# function-definition: - - -def p_function_definition_1(t): - 'function_definition : declaration_specifiers declarator declaration_list compound_statement' - pass - - -def p_function_definition_2(t): - 'function_definition : declarator declaration_list compound_statement' - pass - - -def p_function_definition_3(t): - 'function_definition : declarator compound_statement' - pass - - -def p_function_definition_4(t): - 'function_definition : declaration_specifiers declarator compound_statement' - pass - -# declaration: - - -def p_declaration_1(t): - 'declaration : declaration_specifiers init_declarator_list SEMI' - pass - - -def p_declaration_2(t): - 'declaration : declaration_specifiers SEMI' - pass - -# declaration-list: - - -def p_declaration_list_1(t): - 'declaration_list : declaration' - pass - - -def p_declaration_list_2(t): - 'declaration_list : declaration_list declaration ' - pass - -# declaration-specifiers - - -def p_declaration_specifiers_1(t): - 'declaration_specifiers : storage_class_specifier declaration_specifiers' - pass - - -def p_declaration_specifiers_2(t): - 'declaration_specifiers : type_specifier declaration_specifiers' - pass - - -def p_declaration_specifiers_3(t): - 'declaration_specifiers : type_qualifier declaration_specifiers' - pass - - -def p_declaration_specifiers_4(t): - 'declaration_specifiers : storage_class_specifier' - pass - - -def p_declaration_specifiers_5(t): - 'declaration_specifiers : type_specifier' - pass - - -def p_declaration_specifiers_6(t): - 'declaration_specifiers : type_qualifier' - pass - -# storage-class-specifier - - -def p_storage_class_specifier(t): - '''storage_class_specifier : AUTO - | REGISTER - | STATIC - | EXTERN - | TYPEDEF - ''' - pass - -# type-specifier: - - -def p_type_specifier(t): - '''type_specifier : VOID - | CHAR - | SHORT - | INT - | LONG - | FLOAT - | DOUBLE - | SIGNED - | UNSIGNED - | struct_or_union_specifier - | enum_specifier - | TYPEID - ''' - pass - -# type-qualifier: - - -def p_type_qualifier(t): - '''type_qualifier : CONST - | VOLATILE''' - pass - -# struct-or-union-specifier - - -def p_struct_or_union_specifier_1(t): - 'struct_or_union_specifier : struct_or_union ID LBRACE struct_declaration_list RBRACE' - pass - - -def p_struct_or_union_specifier_2(t): - 'struct_or_union_specifier : struct_or_union LBRACE struct_declaration_list RBRACE' - pass - - -def p_struct_or_union_specifier_3(t): - 'struct_or_union_specifier : struct_or_union ID' - pass - -# struct-or-union: - - -def p_struct_or_union(t): - '''struct_or_union : STRUCT - | UNION - ''' - pass - -# struct-declaration-list: - - -def p_struct_declaration_list_1(t): - 'struct_declaration_list : struct_declaration' - pass - - -def p_struct_declaration_list_2(t): - 'struct_declaration_list : struct_declaration_list struct_declaration' - pass - -# init-declarator-list: - - -def p_init_declarator_list_1(t): - 'init_declarator_list : init_declarator' - pass - - -def p_init_declarator_list_2(t): - 'init_declarator_list : init_declarator_list COMMA init_declarator' - pass - -# init-declarator - - -def p_init_declarator_1(t): - 'init_declarator : declarator' - pass - - -def p_init_declarator_2(t): - 'init_declarator : declarator EQUALS initializer' - pass - -# struct-declaration: - - -def p_struct_declaration(t): - 'struct_declaration : specifier_qualifier_list struct_declarator_list SEMI' - pass - -# specifier-qualifier-list: - - -def p_specifier_qualifier_list_1(t): - 'specifier_qualifier_list : type_specifier specifier_qualifier_list' - pass - - -def p_specifier_qualifier_list_2(t): - 'specifier_qualifier_list : type_specifier' - pass - - -def p_specifier_qualifier_list_3(t): - 'specifier_qualifier_list : type_qualifier specifier_qualifier_list' - pass - - -def p_specifier_qualifier_list_4(t): - 'specifier_qualifier_list : type_qualifier' - pass - -# struct-declarator-list: - - -def p_struct_declarator_list_1(t): - 'struct_declarator_list : struct_declarator' - pass - - -def p_struct_declarator_list_2(t): - 'struct_declarator_list : struct_declarator_list COMMA struct_declarator' - pass - -# struct-declarator: - - -def p_struct_declarator_1(t): - 'struct_declarator : declarator' - pass - - -def p_struct_declarator_2(t): - 'struct_declarator : declarator COLON constant_expression' - pass - - -def p_struct_declarator_3(t): - 'struct_declarator : COLON constant_expression' - pass - -# enum-specifier: - - -def p_enum_specifier_1(t): - 'enum_specifier : ENUM ID LBRACE enumerator_list RBRACE' - pass - - -def p_enum_specifier_2(t): - 'enum_specifier : ENUM LBRACE enumerator_list RBRACE' - pass - - -def p_enum_specifier_3(t): - 'enum_specifier : ENUM ID' - pass - -# enumerator_list: - - -def p_enumerator_list_1(t): - 'enumerator_list : enumerator' - pass - - -def p_enumerator_list_2(t): - 'enumerator_list : enumerator_list COMMA enumerator' - pass - -# enumerator: - - -def p_enumerator_1(t): - 'enumerator : ID' - pass - - -def p_enumerator_2(t): - 'enumerator : ID EQUALS constant_expression' - pass - -# declarator: - - -def p_declarator_1(t): - 'declarator : pointer direct_declarator' - pass - - -def p_declarator_2(t): - 'declarator : direct_declarator' - pass - -# direct-declarator: - - -def p_direct_declarator_1(t): - 'direct_declarator : ID' - pass - - -def p_direct_declarator_2(t): - 'direct_declarator : LPAREN declarator RPAREN' - pass - - -def p_direct_declarator_3(t): - 'direct_declarator : direct_declarator LBRACKET constant_expression_opt RBRACKET' - pass - - -def p_direct_declarator_4(t): - 'direct_declarator : direct_declarator LPAREN parameter_type_list RPAREN ' - pass - - -def p_direct_declarator_5(t): - 'direct_declarator : direct_declarator LPAREN identifier_list RPAREN ' - pass - - -def p_direct_declarator_6(t): - 'direct_declarator : direct_declarator LPAREN RPAREN ' - pass - -# pointer: - - -def p_pointer_1(t): - 'pointer : TIMES type_qualifier_list' - pass - - -def p_pointer_2(t): - 'pointer : TIMES' - pass - - -def p_pointer_3(t): - 'pointer : TIMES type_qualifier_list pointer' - pass - - -def p_pointer_4(t): - 'pointer : TIMES pointer' - pass - -# type-qualifier-list: - - -def p_type_qualifier_list_1(t): - 'type_qualifier_list : type_qualifier' - pass - - -def p_type_qualifier_list_2(t): - 'type_qualifier_list : type_qualifier_list type_qualifier' - pass - -# parameter-type-list: - - -def p_parameter_type_list_1(t): - 'parameter_type_list : parameter_list' - pass - - -def p_parameter_type_list_2(t): - 'parameter_type_list : parameter_list COMMA ELLIPSIS' - pass - -# parameter-list: - - -def p_parameter_list_1(t): - 'parameter_list : parameter_declaration' - pass - - -def p_parameter_list_2(t): - 'parameter_list : parameter_list COMMA parameter_declaration' - pass - -# parameter-declaration: - - -def p_parameter_declaration_1(t): - 'parameter_declaration : declaration_specifiers declarator' - pass - - -def p_parameter_declaration_2(t): - 'parameter_declaration : declaration_specifiers abstract_declarator_opt' - pass - -# identifier-list: - - -def p_identifier_list_1(t): - 'identifier_list : ID' - pass - - -def p_identifier_list_2(t): - 'identifier_list : identifier_list COMMA ID' - pass - -# initializer: - - -def p_initializer_1(t): - 'initializer : assignment_expression' - pass - - -def p_initializer_2(t): - '''initializer : LBRACE initializer_list RBRACE - | LBRACE initializer_list COMMA RBRACE''' - pass - -# initializer-list: - - -def p_initializer_list_1(t): - 'initializer_list : initializer' - pass - - -def p_initializer_list_2(t): - 'initializer_list : initializer_list COMMA initializer' - pass - -# type-name: - - -def p_type_name(t): - 'type_name : specifier_qualifier_list abstract_declarator_opt' - pass - - -def p_abstract_declarator_opt_1(t): - 'abstract_declarator_opt : empty' - pass - - -def p_abstract_declarator_opt_2(t): - 'abstract_declarator_opt : abstract_declarator' - pass - -# abstract-declarator: - - -def p_abstract_declarator_1(t): - 'abstract_declarator : pointer ' - pass - - -def p_abstract_declarator_2(t): - 'abstract_declarator : pointer direct_abstract_declarator' - pass - - -def p_abstract_declarator_3(t): - 'abstract_declarator : direct_abstract_declarator' - pass - -# direct-abstract-declarator: - - -def p_direct_abstract_declarator_1(t): - 'direct_abstract_declarator : LPAREN abstract_declarator RPAREN' - pass - - -def p_direct_abstract_declarator_2(t): - 'direct_abstract_declarator : direct_abstract_declarator LBRACKET constant_expression_opt RBRACKET' - pass - - -def p_direct_abstract_declarator_3(t): - 'direct_abstract_declarator : LBRACKET constant_expression_opt RBRACKET' - pass - - -def p_direct_abstract_declarator_4(t): - 'direct_abstract_declarator : direct_abstract_declarator LPAREN parameter_type_list_opt RPAREN' - pass - - -def p_direct_abstract_declarator_5(t): - 'direct_abstract_declarator : LPAREN parameter_type_list_opt RPAREN' - pass - -# Optional fields in abstract declarators - - -def p_constant_expression_opt_1(t): - 'constant_expression_opt : empty' - pass - - -def p_constant_expression_opt_2(t): - 'constant_expression_opt : constant_expression' - pass - - -def p_parameter_type_list_opt_1(t): - 'parameter_type_list_opt : empty' - pass - - -def p_parameter_type_list_opt_2(t): - 'parameter_type_list_opt : parameter_type_list' - pass - -# statement: - - -def p_statement(t): - ''' - statement : labeled_statement - | expression_statement - | compound_statement - | selection_statement - | iteration_statement - | jump_statement - ''' - pass - -# labeled-statement: - - -def p_labeled_statement_1(t): - 'labeled_statement : ID COLON statement' - pass - - -def p_labeled_statement_2(t): - 'labeled_statement : CASE constant_expression COLON statement' - pass - - -def p_labeled_statement_3(t): - 'labeled_statement : DEFAULT COLON statement' - pass - -# expression-statement: - - -def p_expression_statement(t): - 'expression_statement : expression_opt SEMI' - pass - -# compound-statement: - - -def p_compound_statement_1(t): - 'compound_statement : LBRACE declaration_list statement_list RBRACE' - pass - - -def p_compound_statement_2(t): - 'compound_statement : LBRACE statement_list RBRACE' - pass - - -def p_compound_statement_3(t): - 'compound_statement : LBRACE declaration_list RBRACE' - pass - - -def p_compound_statement_4(t): - 'compound_statement : LBRACE RBRACE' - pass - -# statement-list: - - -def p_statement_list_1(t): - 'statement_list : statement' - pass - - -def p_statement_list_2(t): - 'statement_list : statement_list statement' - pass - -# selection-statement - - -def p_selection_statement_1(t): - 'selection_statement : IF LPAREN expression RPAREN statement' - pass - - -def p_selection_statement_2(t): - 'selection_statement : IF LPAREN expression RPAREN statement ELSE statement ' - pass - - -def p_selection_statement_3(t): - 'selection_statement : SWITCH LPAREN expression RPAREN statement ' - pass - -# iteration_statement: - - -def p_iteration_statement_1(t): - 'iteration_statement : WHILE LPAREN expression RPAREN statement' - pass - - -def p_iteration_statement_2(t): - 'iteration_statement : FOR LPAREN expression_opt SEMI expression_opt SEMI expression_opt RPAREN statement ' - pass - - -def p_iteration_statement_3(t): - 'iteration_statement : DO statement WHILE LPAREN expression RPAREN SEMI' - pass - -# jump_statement: - - -def p_jump_statement_1(t): - 'jump_statement : GOTO ID SEMI' - pass - - -def p_jump_statement_2(t): - 'jump_statement : CONTINUE SEMI' - pass - - -def p_jump_statement_3(t): - 'jump_statement : BREAK SEMI' - pass - - -def p_jump_statement_4(t): - 'jump_statement : RETURN expression_opt SEMI' - pass - - -def p_expression_opt_1(t): - 'expression_opt : empty' - pass - - -def p_expression_opt_2(t): - 'expression_opt : expression' - pass - -# expression: - - -def p_expression_1(t): - 'expression : assignment_expression' - pass - - -def p_expression_2(t): - 'expression : expression COMMA assignment_expression' - pass - -# assigment_expression: - - -def p_assignment_expression_1(t): - 'assignment_expression : conditional_expression' - pass - - -def p_assignment_expression_2(t): - 'assignment_expression : unary_expression assignment_operator assignment_expression' - pass - -# assignment_operator: - - -def p_assignment_operator(t): - ''' - assignment_operator : EQUALS - | TIMESEQUAL - | DIVEQUAL - | MODEQUAL - | PLUSEQUAL - | MINUSEQUAL - | LSHIFTEQUAL - | RSHIFTEQUAL - | ANDEQUAL - | OREQUAL - | XOREQUAL - ''' - pass - -# conditional-expression - - -def p_conditional_expression_1(t): - 'conditional_expression : logical_or_expression' - pass - - -def p_conditional_expression_2(t): - 'conditional_expression : logical_or_expression CONDOP expression COLON conditional_expression ' - pass - -# constant-expression - - -def p_constant_expression(t): - 'constant_expression : conditional_expression' - pass - -# logical-or-expression - - -def p_logical_or_expression_1(t): - 'logical_or_expression : logical_and_expression' - pass - - -def p_logical_or_expression_2(t): - 'logical_or_expression : logical_or_expression LOR logical_and_expression' - pass - -# logical-and-expression - - -def p_logical_and_expression_1(t): - 'logical_and_expression : inclusive_or_expression' - pass - - -def p_logical_and_expression_2(t): - 'logical_and_expression : logical_and_expression LAND inclusive_or_expression' - pass - -# inclusive-or-expression: - - -def p_inclusive_or_expression_1(t): - 'inclusive_or_expression : exclusive_or_expression' - pass - - -def p_inclusive_or_expression_2(t): - 'inclusive_or_expression : inclusive_or_expression OR exclusive_or_expression' - pass - -# exclusive-or-expression: - - -def p_exclusive_or_expression_1(t): - 'exclusive_or_expression : and_expression' - pass - - -def p_exclusive_or_expression_2(t): - 'exclusive_or_expression : exclusive_or_expression XOR and_expression' - pass - -# AND-expression - - -def p_and_expression_1(t): - 'and_expression : equality_expression' - pass - - -def p_and_expression_2(t): - 'and_expression : and_expression AND equality_expression' - pass - - -# equality-expression: -def p_equality_expression_1(t): - 'equality_expression : relational_expression' - pass - - -def p_equality_expression_2(t): - 'equality_expression : equality_expression EQ relational_expression' - pass - - -def p_equality_expression_3(t): - 'equality_expression : equality_expression NE relational_expression' - pass - - -# relational-expression: -def p_relational_expression_1(t): - 'relational_expression : shift_expression' - pass - - -def p_relational_expression_2(t): - 'relational_expression : relational_expression LT shift_expression' - pass - - -def p_relational_expression_3(t): - 'relational_expression : relational_expression GT shift_expression' - pass - - -def p_relational_expression_4(t): - 'relational_expression : relational_expression LE shift_expression' - pass - - -def p_relational_expression_5(t): - 'relational_expression : relational_expression GE shift_expression' - pass - -# shift-expression - - -def p_shift_expression_1(t): - 'shift_expression : additive_expression' - pass - - -def p_shift_expression_2(t): - 'shift_expression : shift_expression LSHIFT additive_expression' - pass - - -def p_shift_expression_3(t): - 'shift_expression : shift_expression RSHIFT additive_expression' - pass - -# additive-expression - - -def p_additive_expression_1(t): - 'additive_expression : multiplicative_expression' - pass - - -def p_additive_expression_2(t): - 'additive_expression : additive_expression PLUS multiplicative_expression' - pass - - -def p_additive_expression_3(t): - 'additive_expression : additive_expression MINUS multiplicative_expression' - pass - -# multiplicative-expression - - -def p_multiplicative_expression_1(t): - 'multiplicative_expression : cast_expression' - pass - - -def p_multiplicative_expression_2(t): - 'multiplicative_expression : multiplicative_expression TIMES cast_expression' - pass - - -def p_multiplicative_expression_3(t): - 'multiplicative_expression : multiplicative_expression DIVIDE cast_expression' - pass - - -def p_multiplicative_expression_4(t): - 'multiplicative_expression : multiplicative_expression MOD cast_expression' - pass - -# cast-expression: - - -def p_cast_expression_1(t): - 'cast_expression : unary_expression' - pass - - -def p_cast_expression_2(t): - 'cast_expression : LPAREN type_name RPAREN cast_expression' - pass - -# unary-expression: - - -def p_unary_expression_1(t): - 'unary_expression : postfix_expression' - pass - - -def p_unary_expression_2(t): - 'unary_expression : PLUSPLUS unary_expression' - pass - - -def p_unary_expression_3(t): - 'unary_expression : MINUSMINUS unary_expression' - pass - - -def p_unary_expression_4(t): - 'unary_expression : unary_operator cast_expression' - pass - - -def p_unary_expression_5(t): - 'unary_expression : SIZEOF unary_expression' - pass - - -def p_unary_expression_6(t): - 'unary_expression : SIZEOF LPAREN type_name RPAREN' - pass - -# unary-operator - - -def p_unary_operator(t): - '''unary_operator : AND - | TIMES - | PLUS - | MINUS - | NOT - | LNOT ''' - pass - -# postfix-expression: - - -def p_postfix_expression_1(t): - 'postfix_expression : primary_expression' - pass - - -def p_postfix_expression_2(t): - 'postfix_expression : postfix_expression LBRACKET expression RBRACKET' - pass - - -def p_postfix_expression_3(t): - 'postfix_expression : postfix_expression LPAREN argument_expression_list RPAREN' - pass - - -def p_postfix_expression_4(t): - 'postfix_expression : postfix_expression LPAREN RPAREN' - pass - - -def p_postfix_expression_5(t): - 'postfix_expression : postfix_expression PERIOD ID' - pass - - -def p_postfix_expression_6(t): - 'postfix_expression : postfix_expression ARROW ID' - pass - - -def p_postfix_expression_7(t): - 'postfix_expression : postfix_expression PLUSPLUS' - pass - - -def p_postfix_expression_8(t): - 'postfix_expression : postfix_expression MINUSMINUS' - pass - -# primary-expression: - - -def p_primary_expression(t): - '''primary_expression : ID - | constant - | SCONST - | LPAREN expression RPAREN''' - pass - -# argument-expression-list: - - -def p_argument_expression_list(t): - '''argument_expression_list : assignment_expression - | argument_expression_list COMMA assignment_expression''' - pass - -# constant: - - -def p_constant(t): - '''constant : ICONST - | FCONST - | CCONST''' - pass - - -def p_empty(t): - 'empty : ' - pass - - -def p_error(t): - print("Whoa. We're hosed") - -import profile -# Build the grammar - -yacc.yacc() -#yacc.yacc(method='LALR',write_tables=False,debug=False) - -#profile.run("yacc.yacc(method='LALR')") diff --git a/xonsh/ply/example/calc/calc.py b/xonsh/ply/example/calc/calc.py deleted file mode 100644 index 824c3d7..0000000 --- a/xonsh/ply/example/calc/calc.py +++ /dev/null @@ -1,123 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. This is from O'Reilly's -# "Lex and Yacc", p. 63. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -tokens = ( - 'NAME', 'NUMBER', -) - -literals = ['=', '+', '-', '*', '/', '(', ')'] - -# Tokens - -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - -def t_NUMBER(t): - r'\d+' - t.value = int(t.value) - return t - -t_ignore = " \t" - - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex() - -# Parsing rules - -precedence = ( - ('left', '+', '-'), - ('left', '*', '/'), - ('right', 'UMINUS'), -) - -# dictionary of names -names = {} - - -def p_statement_assign(p): - 'statement : NAME "=" expression' - names[p[1]] = p[3] - - -def p_statement_expr(p): - 'statement : expression' - print(p[1]) - - -def p_expression_binop(p): - '''expression : expression '+' expression - | expression '-' expression - | expression '*' expression - | expression '/' expression''' - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - - -def p_expression_uminus(p): - "expression : '-' expression %prec UMINUS" - p[0] = -p[2] - - -def p_expression_group(p): - "expression : '(' expression ')'" - p[0] = p[2] - - -def p_expression_number(p): - "expression : NUMBER" - p[0] = p[1] - - -def p_expression_name(p): - "expression : NAME" - try: - p[0] = names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - -def p_error(p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - -import ply.yacc as yacc -yacc.yacc() - -while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(s) diff --git a/xonsh/ply/example/calcdebug/calc.py b/xonsh/ply/example/calcdebug/calc.py deleted file mode 100644 index 06831e2..0000000 --- a/xonsh/ply/example/calcdebug/calc.py +++ /dev/null @@ -1,129 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# This example shows how to run the parser in a debugging mode -# with output routed to a logging object. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -tokens = ( - 'NAME', 'NUMBER', -) - -literals = ['=', '+', '-', '*', '/', '(', ')'] - -# Tokens - -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - -def t_NUMBER(t): - r'\d+' - t.value = int(t.value) - return t - -t_ignore = " \t" - - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex() - -# Parsing rules - -precedence = ( - ('left', '+', '-'), - ('left', '*', '/'), - ('right', 'UMINUS'), -) - -# dictionary of names -names = {} - - -def p_statement_assign(p): - 'statement : NAME "=" expression' - names[p[1]] = p[3] - - -def p_statement_expr(p): - 'statement : expression' - print(p[1]) - - -def p_expression_binop(p): - '''expression : expression '+' expression - | expression '-' expression - | expression '*' expression - | expression '/' expression''' - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - - -def p_expression_uminus(p): - "expression : '-' expression %prec UMINUS" - p[0] = -p[2] - - -def p_expression_group(p): - "expression : '(' expression ')'" - p[0] = p[2] - - -def p_expression_number(p): - "expression : NUMBER" - p[0] = p[1] - - -def p_expression_name(p): - "expression : NAME" - try: - p[0] = names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - -def p_error(p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - -import ply.yacc as yacc -yacc.yacc() - -import logging -logging.basicConfig( - level=logging.INFO, - filename="parselog.txt" -) - -while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(s, debug=logging.getLogger()) diff --git a/xonsh/ply/example/calceof/calc.py b/xonsh/ply/example/calceof/calc.py deleted file mode 100644 index 22b39a4..0000000 --- a/xonsh/ply/example/calceof/calc.py +++ /dev/null @@ -1,132 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. Asks the user for more input and -# demonstrates the use of the t_eof() rule. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -tokens = ( - 'NAME', 'NUMBER', -) - -literals = ['=', '+', '-', '*', '/', '(', ')'] - -# Tokens - -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - -def t_NUMBER(t): - r'\d+' - t.value = int(t.value) - return t - -t_ignore = " \t" - - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - -def t_eof(t): - more = raw_input('... ') - if more: - t.lexer.input(more + '\n') - return t.lexer.token() - else: - return None - - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex() - -# Parsing rules - -precedence = ( - ('left', '+', '-'), - ('left', '*', '/'), - ('right', 'UMINUS'), -) - -# dictionary of names -names = {} - - -def p_statement_assign(p): - 'statement : NAME "=" expression' - names[p[1]] = p[3] - - -def p_statement_expr(p): - 'statement : expression' - print(p[1]) - - -def p_expression_binop(p): - '''expression : expression '+' expression - | expression '-' expression - | expression '*' expression - | expression '/' expression''' - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - - -def p_expression_uminus(p): - "expression : '-' expression %prec UMINUS" - p[0] = -p[2] - - -def p_expression_group(p): - "expression : '(' expression ')'" - p[0] = p[2] - - -def p_expression_number(p): - "expression : NUMBER" - p[0] = p[1] - - -def p_expression_name(p): - "expression : NAME" - try: - p[0] = names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - -def p_error(p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - -import ply.yacc as yacc -yacc.yacc() - -while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(s + '\n') diff --git a/xonsh/ply/example/classcalc/calc.py b/xonsh/ply/example/classcalc/calc.py deleted file mode 100755 index ada4afd..0000000 --- a/xonsh/ply/example/classcalc/calc.py +++ /dev/null @@ -1,165 +0,0 @@ -#!/usr/bin/env python - -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. This is from O'Reilly's -# "Lex and Yacc", p. 63. -# -# Class-based example contributed to PLY by David McNab -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -import ply.lex as lex -import ply.yacc as yacc -import os - - -class Parser: - """ - Base class for a lexer/parser that has the rules defined as methods - """ - tokens = () - precedence = () - - def __init__(self, **kw): - self.debug = kw.get('debug', 0) - self.names = {} - try: - modname = os.path.split(os.path.splitext(__file__)[0])[ - 1] + "_" + self.__class__.__name__ - except: - modname = "parser" + "_" + self.__class__.__name__ - self.debugfile = modname + ".dbg" - self.tabmodule = modname + "_" + "parsetab" - # print self.debugfile, self.tabmodule - - # Build the lexer and parser - lex.lex(module=self, debug=self.debug) - yacc.yacc(module=self, - debug=self.debug, - debugfile=self.debugfile, - tabmodule=self.tabmodule) - - def run(self): - while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(s) - - -class Calc(Parser): - - tokens = ( - 'NAME', 'NUMBER', - 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS', - 'LPAREN', 'RPAREN', - ) - - # Tokens - - t_PLUS = r'\+' - t_MINUS = r'-' - t_EXP = r'\*\*' - t_TIMES = r'\*' - t_DIVIDE = r'/' - t_EQUALS = r'=' - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(self, t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - # print "parsed number %s" % repr(t.value) - return t - - t_ignore = " \t" - - def t_newline(self, t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - def t_error(self, t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - # Parsing rules - - precedence = ( - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('left', 'EXP'), - ('right', 'UMINUS'), - ) - - def p_statement_assign(self, p): - 'statement : NAME EQUALS expression' - self.names[p[1]] = p[3] - - def p_statement_expr(self, p): - 'statement : expression' - print(p[1]) - - def p_expression_binop(self, p): - """ - expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression - | expression EXP expression - """ - # print [repr(p[i]) for i in range(0,4)] - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - elif p[2] == '**': - p[0] = p[1] ** p[3] - - def p_expression_uminus(self, p): - 'expression : MINUS expression %prec UMINUS' - p[0] = -p[2] - - def p_expression_group(self, p): - 'expression : LPAREN expression RPAREN' - p[0] = p[2] - - def p_expression_number(self, p): - 'expression : NUMBER' - p[0] = p[1] - - def p_expression_name(self, p): - 'expression : NAME' - try: - p[0] = self.names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - def p_error(self, p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - -if __name__ == '__main__': - calc = Calc() - calc.run() diff --git a/xonsh/ply/example/cleanup.sh b/xonsh/ply/example/cleanup.sh deleted file mode 100755 index 3e115f4..0000000 --- a/xonsh/ply/example/cleanup.sh +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -rm -f */*.pyc */parsetab.py */parser.out */*~ */*.class diff --git a/xonsh/ply/example/closurecalc/calc.py b/xonsh/ply/example/closurecalc/calc.py deleted file mode 100644 index 6031b05..0000000 --- a/xonsh/ply/example/closurecalc/calc.py +++ /dev/null @@ -1,132 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# A calculator parser that makes use of closures. The function make_calculator() -# returns a function that accepts an input string and returns a result. All -# lexing rules, parsing rules, and internal state are held inside the function. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -# Make a calculator function - - -def make_calculator(): - import ply.lex as lex - import ply.yacc as yacc - - # ------- Internal calculator state - - variables = {} # Dictionary of stored variables - - # ------- Calculator tokenizing rules - - tokens = ( - 'NAME', 'NUMBER', - ) - - literals = ['=', '+', '-', '*', '/', '(', ')'] - - t_ignore = " \t" - - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(t): - r'\d+' - t.value = int(t.value) - return t - - def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - # Build the lexer - lexer = lex.lex() - - # ------- Calculator parsing rules - - precedence = ( - ('left', '+', '-'), - ('left', '*', '/'), - ('right', 'UMINUS'), - ) - - def p_statement_assign(p): - 'statement : NAME "=" expression' - variables[p[1]] = p[3] - p[0] = None - - def p_statement_expr(p): - 'statement : expression' - p[0] = p[1] - - def p_expression_binop(p): - '''expression : expression '+' expression - | expression '-' expression - | expression '*' expression - | expression '/' expression''' - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - - def p_expression_uminus(p): - "expression : '-' expression %prec UMINUS" - p[0] = -p[2] - - def p_expression_group(p): - "expression : '(' expression ')'" - p[0] = p[2] - - def p_expression_number(p): - "expression : NUMBER" - p[0] = p[1] - - def p_expression_name(p): - "expression : NAME" - try: - p[0] = variables[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - def p_error(p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - - # Build the parser - parser = yacc.yacc() - - # ------- Input function - - def input(text): - result = parser.parse(text, lexer=lexer) - return result - - return input - -# Make a calculator object and use it -calc = make_calculator() - -while True: - try: - s = raw_input("calc > ") - except EOFError: - break - r = calc(s) - if r: - print(r) diff --git a/xonsh/ply/example/hedit/hedit.py b/xonsh/ply/example/hedit/hedit.py deleted file mode 100644 index 32da745..0000000 --- a/xonsh/ply/example/hedit/hedit.py +++ /dev/null @@ -1,48 +0,0 @@ -# ----------------------------------------------------------------------------- -# hedit.py -# -# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson) -# -# These tokens can't be easily tokenized because they are of the following -# form: -# -# nHc1...cn -# -# where n is a positive integer and c1 ... cn are characters. -# -# This example shows how to modify the state of the lexer to parse -# such tokens -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - - -tokens = ( - 'H_EDIT_DESCRIPTOR', -) - -# Tokens -t_ignore = " \t\n" - - -def t_H_EDIT_DESCRIPTOR(t): - r"\d+H.*" # This grabs all of the remaining text - i = t.value.index('H') - n = eval(t.value[:i]) - - # Adjust the tokenizing position - t.lexer.lexpos -= len(t.value) - (i + 1 + n) - - t.value = t.value[i + 1:i + 1 + n] - return t - - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex() -lex.runmain() diff --git a/xonsh/ply/example/newclasscalc/calc.py b/xonsh/ply/example/newclasscalc/calc.py deleted file mode 100755 index 43c9506..0000000 --- a/xonsh/ply/example/newclasscalc/calc.py +++ /dev/null @@ -1,167 +0,0 @@ -#!/usr/bin/env python - -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. This is from O'Reilly's -# "Lex and Yacc", p. 63. -# -# Class-based example contributed to PLY by David McNab. -# -# Modified to use new-style classes. Test case. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -import ply.lex as lex -import ply.yacc as yacc -import os - - -class Parser(object): - """ - Base class for a lexer/parser that has the rules defined as methods - """ - tokens = () - precedence = () - - def __init__(self, **kw): - self.debug = kw.get('debug', 0) - self.names = {} - try: - modname = os.path.split(os.path.splitext(__file__)[0])[ - 1] + "_" + self.__class__.__name__ - except: - modname = "parser" + "_" + self.__class__.__name__ - self.debugfile = modname + ".dbg" - self.tabmodule = modname + "_" + "parsetab" - # print self.debugfile, self.tabmodule - - # Build the lexer and parser - lex.lex(module=self, debug=self.debug) - yacc.yacc(module=self, - debug=self.debug, - debugfile=self.debugfile, - tabmodule=self.tabmodule) - - def run(self): - while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(s) - - -class Calc(Parser): - - tokens = ( - 'NAME', 'NUMBER', - 'PLUS', 'MINUS', 'EXP', 'TIMES', 'DIVIDE', 'EQUALS', - 'LPAREN', 'RPAREN', - ) - - # Tokens - - t_PLUS = r'\+' - t_MINUS = r'-' - t_EXP = r'\*\*' - t_TIMES = r'\*' - t_DIVIDE = r'/' - t_EQUALS = r'=' - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(self, t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - # print "parsed number %s" % repr(t.value) - return t - - t_ignore = " \t" - - def t_newline(self, t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - def t_error(self, t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - # Parsing rules - - precedence = ( - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('left', 'EXP'), - ('right', 'UMINUS'), - ) - - def p_statement_assign(self, p): - 'statement : NAME EQUALS expression' - self.names[p[1]] = p[3] - - def p_statement_expr(self, p): - 'statement : expression' - print(p[1]) - - def p_expression_binop(self, p): - """ - expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression - | expression EXP expression - """ - # print [repr(p[i]) for i in range(0,4)] - if p[2] == '+': - p[0] = p[1] + p[3] - elif p[2] == '-': - p[0] = p[1] - p[3] - elif p[2] == '*': - p[0] = p[1] * p[3] - elif p[2] == '/': - p[0] = p[1] / p[3] - elif p[2] == '**': - p[0] = p[1] ** p[3] - - def p_expression_uminus(self, p): - 'expression : MINUS expression %prec UMINUS' - p[0] = -p[2] - - def p_expression_group(self, p): - 'expression : LPAREN expression RPAREN' - p[0] = p[2] - - def p_expression_number(self, p): - 'expression : NUMBER' - p[0] = p[1] - - def p_expression_name(self, p): - 'expression : NAME' - try: - p[0] = self.names[p[1]] - except LookupError: - print("Undefined name '%s'" % p[1]) - p[0] = 0 - - def p_error(self, p): - if p: - print("Syntax error at '%s'" % p.value) - else: - print("Syntax error at EOF") - -if __name__ == '__main__': - calc = Calc() - calc.run() diff --git a/xonsh/ply/example/optcalc/README b/xonsh/ply/example/optcalc/README deleted file mode 100644 index 53dd5fc..0000000 --- a/xonsh/ply/example/optcalc/README +++ /dev/null @@ -1,9 +0,0 @@ -An example showing how to use Python optimized mode. -To run: - - - First run 'python calc.py' - - - Then run 'python -OO calc.py' - -If working correctly, the second version should run the -same way. diff --git a/xonsh/ply/example/optcalc/calc.py b/xonsh/ply/example/optcalc/calc.py deleted file mode 100644 index 0c223e5..0000000 --- a/xonsh/ply/example/optcalc/calc.py +++ /dev/null @@ -1,134 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. This is from O'Reilly's -# "Lex and Yacc", p. 63. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -if sys.version_info[0] >= 3: - raw_input = input - -tokens = ( - 'NAME', 'NUMBER', - 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS', - 'LPAREN', 'RPAREN', -) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex(optimize=1) - -# Parsing rules - -precedence = ( - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('right', 'UMINUS'), -) - -# dictionary of names -names = {} - - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+': - t[0] = t[1] + t[3] - elif t[2] == '-': - t[0] = t[1] - t[3] - elif t[2] == '*': - t[0] = t[1] * t[3] - elif t[2] == '/': - t[0] = t[1] / t[3] - elif t[2] == '<': - t[0] = t[1] < t[3] - - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - - -def p_error(t): - if t: - print("Syntax error at '%s'" % t.value) - else: - print("Syntax error at EOF") - -import ply.yacc as yacc -yacc.yacc(optimize=1) - -while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - yacc.parse(s) diff --git a/xonsh/ply/example/unicalc/calc.py b/xonsh/ply/example/unicalc/calc.py deleted file mode 100644 index 901c4b9..0000000 --- a/xonsh/ply/example/unicalc/calc.py +++ /dev/null @@ -1,133 +0,0 @@ -# ----------------------------------------------------------------------------- -# calc.py -# -# A simple calculator with variables. This is from O'Reilly's -# "Lex and Yacc", p. 63. -# -# This example uses unicode strings for tokens, docstrings, and input. -# ----------------------------------------------------------------------------- - -import sys -sys.path.insert(0, "../..") - -tokens = ( - 'NAME', 'NUMBER', - 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'EQUALS', - 'LPAREN', 'RPAREN', -) - -# Tokens - -t_PLUS = ur'\+' -t_MINUS = ur'-' -t_TIMES = ur'\*' -t_DIVIDE = ur'/' -t_EQUALS = ur'=' -t_LPAREN = ur'\(' -t_RPAREN = ur'\)' -t_NAME = ur'[a-zA-Z_][a-zA-Z0-9_]*' - - -def t_NUMBER(t): - ur'\d+' - try: - t.value = int(t.value) - except ValueError: - print "Integer value too large", t.value - t.value = 0 - return t - -t_ignore = u" \t" - - -def t_newline(t): - ur'\n+' - t.lexer.lineno += t.value.count("\n") - - -def t_error(t): - print "Illegal character '%s'" % t.value[0] - t.lexer.skip(1) - -# Build the lexer -import ply.lex as lex -lex.lex() - -# Parsing rules - -precedence = ( - ('left', 'PLUS', 'MINUS'), - ('left', 'TIMES', 'DIVIDE'), - ('right', 'UMINUS'), -) - -# dictionary of names -names = {} - - -def p_statement_assign(p): - 'statement : NAME EQUALS expression' - names[p[1]] = p[3] - - -def p_statement_expr(p): - 'statement : expression' - print p[1] - - -def p_expression_binop(p): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if p[2] == u'+': - p[0] = p[1] + p[3] - elif p[2] == u'-': - p[0] = p[1] - p[3] - elif p[2] == u'*': - p[0] = p[1] * p[3] - elif p[2] == u'/': - p[0] = p[1] / p[3] - - -def p_expression_uminus(p): - 'expression : MINUS expression %prec UMINUS' - p[0] = -p[2] - - -def p_expression_group(p): - 'expression : LPAREN expression RPAREN' - p[0] = p[2] - - -def p_expression_number(p): - 'expression : NUMBER' - p[0] = p[1] - - -def p_expression_name(p): - 'expression : NAME' - try: - p[0] = names[p[1]] - except LookupError: - print "Undefined name '%s'" % p[1] - p[0] = 0 - - -def p_error(p): - if p: - print "Syntax error at '%s'" % p.value - else: - print "Syntax error at EOF" - -import ply.yacc as yacc -yacc.yacc() - -while 1: - try: - s = raw_input('calc > ') - except EOFError: - break - if not s: - continue - yacc.parse(unicode(s)) diff --git a/xonsh/ply/example/yply/README b/xonsh/ply/example/yply/README deleted file mode 100644 index bfadf36..0000000 --- a/xonsh/ply/example/yply/README +++ /dev/null @@ -1,41 +0,0 @@ -yply.py - -This example implements a program yply.py that converts a UNIX-yacc -specification file into a PLY-compatible program. To use, simply -run it like this: - - % python yply.py [-nocode] inputfile.y >myparser.py - -The output of this program is Python code. In the output, -any C code in the original file is included, but is commented out. -If you use the -nocode option, then all of the C code in the -original file is just discarded. - -To use the resulting grammer with PLY, you'll need to edit the -myparser.py file. Within this file, some stub code is included that -can be used to test the construction of the parsing tables. However, -you'll need to do more editing to make a workable parser. - -Disclaimer: This just an example I threw together in an afternoon. -It might have some bugs. However, it worked when I tried it on -a yacc-specified C++ parser containing 442 rules and 855 parsing -states. - -Comments: - -1. This example does not parse specification files meant for lex/flex. - You'll need to specify the tokenizer on your own. - -2. This example shows a number of interesting PLY features including - - - Parsing of literal text delimited by nested parentheses - - Some interaction between the parser and the lexer. - - Use of literals in the grammar specification - - One pass compilation. The program just emits the result, - there is no intermediate parse tree. - -3. This program could probably be cleaned up and enhanced a lot. - It would be great if someone wanted to work on this (hint). - --Dave - diff --git a/xonsh/ply/example/yply/ylex.py b/xonsh/ply/example/yply/ylex.py deleted file mode 100644 index 16410e2..0000000 --- a/xonsh/ply/example/yply/ylex.py +++ /dev/null @@ -1,119 +0,0 @@ -# lexer for yacc-grammars -# -# Author: David Beazley (dave@dabeaz.com) -# Date : October 2, 2006 - -import sys -sys.path.append("../..") - -from ply import * - -tokens = ( - 'LITERAL', 'SECTION', 'TOKEN', 'LEFT', 'RIGHT', 'PREC', 'START', 'TYPE', 'NONASSOC', 'UNION', 'CODE', - 'ID', 'QLITERAL', 'NUMBER', -) - -states = (('code', 'exclusive'),) - -literals = [';', ',', '<', '>', '|', ':'] -t_ignore = ' \t' - -t_TOKEN = r'%token' -t_LEFT = r'%left' -t_RIGHT = r'%right' -t_NONASSOC = r'%nonassoc' -t_PREC = r'%prec' -t_START = r'%start' -t_TYPE = r'%type' -t_UNION = r'%union' -t_ID = r'[a-zA-Z_][a-zA-Z_0-9]*' -t_QLITERAL = r'''(?P['"]).*?(?P=quote)''' -t_NUMBER = r'\d+' - - -def t_SECTION(t): - r'%%' - if getattr(t.lexer, "lastsection", 0): - t.value = t.lexer.lexdata[t.lexpos + 2:] - t.lexer.lexpos = len(t.lexer.lexdata) - else: - t.lexer.lastsection = 0 - return t - -# Comments - - -def t_ccomment(t): - r'/\*(.|\n)*?\*/' - t.lexer.lineno += t.value.count('\n') - -t_ignore_cppcomment = r'//.*' - - -def t_LITERAL(t): - r'%\{(.|\n)*?%\}' - t.lexer.lineno += t.value.count("\n") - return t - - -def t_NEWLINE(t): - r'\n' - t.lexer.lineno += 1 - - -def t_code(t): - r'\{' - t.lexer.codestart = t.lexpos - t.lexer.level = 1 - t.lexer.begin('code') - - -def t_code_ignore_string(t): - r'\"([^\\\n]|(\\.))*?\"' - - -def t_code_ignore_char(t): - r'\'([^\\\n]|(\\.))*?\'' - - -def t_code_ignore_comment(t): - r'/\*(.|\n)*?\*/' - - -def t_code_ignore_cppcom(t): - r'//.*' - - -def t_code_lbrace(t): - r'\{' - t.lexer.level += 1 - - -def t_code_rbrace(t): - r'\}' - t.lexer.level -= 1 - if t.lexer.level == 0: - t.type = 'CODE' - t.value = t.lexer.lexdata[t.lexer.codestart:t.lexpos + 1] - t.lexer.begin('INITIAL') - t.lexer.lineno += t.value.count('\n') - return t - -t_code_ignore_nonspace = r'[^\s\}\'\"\{]+' -t_code_ignore_whitespace = r'\s+' -t_code_ignore = "" - - -def t_code_error(t): - raise RuntimeError - - -def t_error(t): - print("%d: Illegal character '%s'" % (t.lexer.lineno, t.value[0])) - print(t.value) - t.lexer.skip(1) - -lex.lex() - -if __name__ == '__main__': - lex.runmain() diff --git a/xonsh/ply/example/yply/yparse.py b/xonsh/ply/example/yply/yparse.py deleted file mode 100644 index 1f2e8d0..0000000 --- a/xonsh/ply/example/yply/yparse.py +++ /dev/null @@ -1,244 +0,0 @@ -# parser for Unix yacc-based grammars -# -# Author: David Beazley (dave@dabeaz.com) -# Date : October 2, 2006 - -import ylex -tokens = ylex.tokens - -from ply import * - -tokenlist = [] -preclist = [] - -emit_code = 1 - - -def p_yacc(p): - '''yacc : defsection rulesection''' - - -def p_defsection(p): - '''defsection : definitions SECTION - | SECTION''' - p.lexer.lastsection = 1 - print("tokens = ", repr(tokenlist)) - print() - print("precedence = ", repr(preclist)) - print() - print("# -------------- RULES ----------------") - print() - - -def p_rulesection(p): - '''rulesection : rules SECTION''' - - print("# -------------- RULES END ----------------") - print_code(p[2], 0) - - -def p_definitions(p): - '''definitions : definitions definition - | definition''' - - -def p_definition_literal(p): - '''definition : LITERAL''' - print_code(p[1], 0) - - -def p_definition_start(p): - '''definition : START ID''' - print("start = '%s'" % p[2]) - - -def p_definition_token(p): - '''definition : toktype opttype idlist optsemi ''' - for i in p[3]: - if i[0] not in "'\"": - tokenlist.append(i) - if p[1] == '%left': - preclist.append(('left',) + tuple(p[3])) - elif p[1] == '%right': - preclist.append(('right',) + tuple(p[3])) - elif p[1] == '%nonassoc': - preclist.append(('nonassoc',) + tuple(p[3])) - - -def p_toktype(p): - '''toktype : TOKEN - | LEFT - | RIGHT - | NONASSOC''' - p[0] = p[1] - - -def p_opttype(p): - '''opttype : '<' ID '>' - | empty''' - - -def p_idlist(p): - '''idlist : idlist optcomma tokenid - | tokenid''' - if len(p) == 2: - p[0] = [p[1]] - else: - p[0] = p[1] - p[1].append(p[3]) - - -def p_tokenid(p): - '''tokenid : ID - | ID NUMBER - | QLITERAL - | QLITERAL NUMBER''' - p[0] = p[1] - - -def p_optsemi(p): - '''optsemi : ';' - | empty''' - - -def p_optcomma(p): - '''optcomma : ',' - | empty''' - - -def p_definition_type(p): - '''definition : TYPE '<' ID '>' namelist optsemi''' - # type declarations are ignored - - -def p_namelist(p): - '''namelist : namelist optcomma ID - | ID''' - - -def p_definition_union(p): - '''definition : UNION CODE optsemi''' - # Union declarations are ignored - - -def p_rules(p): - '''rules : rules rule - | rule''' - if len(p) == 2: - rule = p[1] - else: - rule = p[2] - - # Print out a Python equivalent of this rule - - embedded = [] # Embedded actions (a mess) - embed_count = 0 - - rulename = rule[0] - rulecount = 1 - for r in rule[1]: - # r contains one of the rule possibilities - print("def p_%s_%d(p):" % (rulename, rulecount)) - prod = [] - prodcode = "" - for i in range(len(r)): - item = r[i] - if item[0] == '{': # A code block - if i == len(r) - 1: - prodcode = item - break - else: - # an embedded action - embed_name = "_embed%d_%s" % (embed_count, rulename) - prod.append(embed_name) - embedded.append((embed_name, item)) - embed_count += 1 - else: - prod.append(item) - print(" '''%s : %s'''" % (rulename, " ".join(prod))) - # Emit code - print_code(prodcode, 4) - print() - rulecount += 1 - - for e, code in embedded: - print("def p_%s(p):" % e) - print(" '''%s : '''" % e) - print_code(code, 4) - print() - - -def p_rule(p): - '''rule : ID ':' rulelist ';' ''' - p[0] = (p[1], [p[3]]) - - -def p_rule2(p): - '''rule : ID ':' rulelist morerules ';' ''' - p[4].insert(0, p[3]) - p[0] = (p[1], p[4]) - - -def p_rule_empty(p): - '''rule : ID ':' ';' ''' - p[0] = (p[1], [[]]) - - -def p_rule_empty2(p): - '''rule : ID ':' morerules ';' ''' - - p[3].insert(0, []) - p[0] = (p[1], p[3]) - - -def p_morerules(p): - '''morerules : morerules '|' rulelist - | '|' rulelist - | '|' ''' - - if len(p) == 2: - p[0] = [[]] - elif len(p) == 3: - p[0] = [p[2]] - else: - p[0] = p[1] - p[0].append(p[3]) - -# print("morerules", len(p), p[0]) - - -def p_rulelist(p): - '''rulelist : rulelist ruleitem - | ruleitem''' - - if len(p) == 2: - p[0] = [p[1]] - else: - p[0] = p[1] - p[1].append(p[2]) - - -def p_ruleitem(p): - '''ruleitem : ID - | QLITERAL - | CODE - | PREC''' - p[0] = p[1] - - -def p_empty(p): - '''empty : ''' - - -def p_error(p): - pass - -yacc.yacc(debug=0) - - -def print_code(code, indent): - if not emit_code: - return - codelines = code.splitlines() - for c in codelines: - print("%s# %s" % (" " * indent, c)) diff --git a/xonsh/ply/example/yply/yply.py b/xonsh/ply/example/yply/yply.py deleted file mode 100755 index e24616c..0000000 --- a/xonsh/ply/example/yply/yply.py +++ /dev/null @@ -1,51 +0,0 @@ -#!/usr/local/bin/python -# yply.py -# -# Author: David Beazley (dave@dabeaz.com) -# Date : October 2, 2006 -# -# Converts a UNIX-yacc specification file into a PLY-compatible -# specification. To use, simply do this: -# -# % python yply.py [-nocode] inputfile.y >myparser.py -# -# The output of this program is Python code. In the output, -# any C code in the original file is included, but is commented. -# If you use the -nocode option, then all of the C code in the -# original file is discarded. -# -# Disclaimer: This just an example I threw together in an afternoon. -# It might have some bugs. However, it worked when I tried it on -# a yacc-specified C++ parser containing 442 rules and 855 parsing -# states. -# - -import sys -sys.path.insert(0, "../..") - -import ylex -import yparse - -from ply import * - -if len(sys.argv) == 1: - print("usage : yply.py [-nocode] inputfile") - raise SystemExit - -if len(sys.argv) == 3: - if sys.argv[1] == '-nocode': - yparse.emit_code = 0 - else: - print("Unknown option '%s'" % sys.argv[1]) - raise SystemExit - filename = sys.argv[2] -else: - filename = sys.argv[1] - -yacc.parse(open(filename).read()) - -print(""" -if __name__ == '__main__': - from ply import * - yacc.yacc() -""") diff --git a/xonsh/ply/ply/__init__.py b/xonsh/ply/ply/__init__.py deleted file mode 100644 index 23707c6..0000000 --- a/xonsh/ply/ply/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# PLY package -# Author: David Beazley (dave@dabeaz.com) - -__version__ = '3.11' -__all__ = ['lex','yacc'] diff --git a/xonsh/ply/ply/cpp.py b/xonsh/ply/ply/cpp.py deleted file mode 100644 index 50a44a1..0000000 --- a/xonsh/ply/ply/cpp.py +++ /dev/null @@ -1,974 +0,0 @@ -# ----------------------------------------------------------------------------- -# ply: cpp.py -# -# Copyright (C) 2001-2019 -# David M. Beazley (Dabeaz LLC) -# All rights reserved. -# -# Latest version: https://github.com/dabeaz/ply -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of David Beazley or Dabeaz LLC may be used to -# endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------------------------- - -# This module implements an ANSI-C style lexical preprocessor for PLY. -# ----------------------------------------------------------------------------- -from __future__ import generators - -import sys - -# Some Python 3 compatibility shims -if sys.version_info.major < 3: - STRING_TYPES = (str, unicode) -else: - STRING_TYPES = str - xrange = range - -# ----------------------------------------------------------------------------- -# Default preprocessor lexer definitions. These tokens are enough to get -# a basic preprocessor working. Other modules may import these if they want -# ----------------------------------------------------------------------------- - -tokens = ( - 'CPP_ID','CPP_INTEGER', 'CPP_FLOAT', 'CPP_STRING', 'CPP_CHAR', 'CPP_WS', 'CPP_COMMENT1', 'CPP_COMMENT2', 'CPP_POUND','CPP_DPOUND' -) - -literals = "+-*/%|&~^<>=!?()[]{}.,;:\\\'\"" - -# Whitespace -def t_CPP_WS(t): - r'\s+' - t.lexer.lineno += t.value.count("\n") - return t - -t_CPP_POUND = r'\#' -t_CPP_DPOUND = r'\#\#' - -# Identifier -t_CPP_ID = r'[A-Za-z_][\w_]*' - -# Integer literal -def CPP_INTEGER(t): - r'(((((0x)|(0X))[0-9a-fA-F]+)|(\d+))([uU][lL]|[lL][uU]|[uU]|[lL])?)' - return t - -t_CPP_INTEGER = CPP_INTEGER - -# Floating literal -t_CPP_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' - -# String literal -def t_CPP_STRING(t): - r'\"([^\\\n]|(\\(.|\n)))*?\"' - t.lexer.lineno += t.value.count("\n") - return t - -# Character constant 'c' or L'c' -def t_CPP_CHAR(t): - r'(L)?\'([^\\\n]|(\\(.|\n)))*?\'' - t.lexer.lineno += t.value.count("\n") - return t - -# Comment -def t_CPP_COMMENT1(t): - r'(/\*(.|\n)*?\*/)' - ncr = t.value.count("\n") - t.lexer.lineno += ncr - # replace with one space or a number of '\n' - t.type = 'CPP_WS'; t.value = '\n' * ncr if ncr else ' ' - return t - -# Line comment -def t_CPP_COMMENT2(t): - r'(//.*?(\n|$))' - # replace with '/n' - t.type = 'CPP_WS'; t.value = '\n' - return t - -def t_error(t): - t.type = t.value[0] - t.value = t.value[0] - t.lexer.skip(1) - return t - -import re -import copy -import time -import os.path - -# ----------------------------------------------------------------------------- -# trigraph() -# -# Given an input string, this function replaces all trigraph sequences. -# The following mapping is used: -# -# ??= # -# ??/ \ -# ??' ^ -# ??( [ -# ??) ] -# ??! | -# ??< { -# ??> } -# ??- ~ -# ----------------------------------------------------------------------------- - -_trigraph_pat = re.compile(r'''\?\?[=/\'\(\)\!<>\-]''') -_trigraph_rep = { - '=':'#', - '/':'\\', - "'":'^', - '(':'[', - ')':']', - '!':'|', - '<':'{', - '>':'}', - '-':'~' -} - -def trigraph(input): - return _trigraph_pat.sub(lambda g: _trigraph_rep[g.group()[-1]],input) - -# ------------------------------------------------------------------ -# Macro object -# -# This object holds information about preprocessor macros -# -# .name - Macro name (string) -# .value - Macro value (a list of tokens) -# .arglist - List of argument names -# .variadic - Boolean indicating whether or not variadic macro -# .vararg - Name of the variadic parameter -# -# When a macro is created, the macro replacement token sequence is -# pre-scanned and used to create patch lists that are later used -# during macro expansion -# ------------------------------------------------------------------ - -class Macro(object): - def __init__(self,name,value,arglist=None,variadic=False): - self.name = name - self.value = value - self.arglist = arglist - self.variadic = variadic - if variadic: - self.vararg = arglist[-1] - self.source = None - -# ------------------------------------------------------------------ -# Preprocessor object -# -# Object representing a preprocessor. Contains macro definitions, -# include directories, and other information -# ------------------------------------------------------------------ - -class Preprocessor(object): - def __init__(self,lexer=None): - if lexer is None: - lexer = lex.lexer - self.lexer = lexer - self.macros = { } - self.path = [] - self.temp_path = [] - - # Probe the lexer for selected tokens - self.lexprobe() - - tm = time.localtime() - self.define("__DATE__ \"%s\"" % time.strftime("%b %d %Y",tm)) - self.define("__TIME__ \"%s\"" % time.strftime("%H:%M:%S",tm)) - self.parser = None - - # ----------------------------------------------------------------------------- - # tokenize() - # - # Utility function. Given a string of text, tokenize into a list of tokens - # ----------------------------------------------------------------------------- - - def tokenize(self,text): - tokens = [] - self.lexer.input(text) - while True: - tok = self.lexer.token() - if not tok: break - tokens.append(tok) - return tokens - - # --------------------------------------------------------------------- - # error() - # - # Report a preprocessor error/warning of some kind - # ---------------------------------------------------------------------- - - def error(self,file,line,msg): - print("%s:%d %s" % (file,line,msg)) - - # ---------------------------------------------------------------------- - # lexprobe() - # - # This method probes the preprocessor lexer object to discover - # the token types of symbols that are important to the preprocessor. - # If this works right, the preprocessor will simply "work" - # with any suitable lexer regardless of how tokens have been named. - # ---------------------------------------------------------------------- - - def lexprobe(self): - - # Determine the token type for identifiers - self.lexer.input("identifier") - tok = self.lexer.token() - if not tok or tok.value != "identifier": - print("Couldn't determine identifier type") - else: - self.t_ID = tok.type - - # Determine the token type for integers - self.lexer.input("12345") - tok = self.lexer.token() - if not tok or int(tok.value) != 12345: - print("Couldn't determine integer type") - else: - self.t_INTEGER = tok.type - self.t_INTEGER_TYPE = type(tok.value) - - # Determine the token type for strings enclosed in double quotes - self.lexer.input("\"filename\"") - tok = self.lexer.token() - if not tok or tok.value != "\"filename\"": - print("Couldn't determine string type") - else: - self.t_STRING = tok.type - - # Determine the token type for whitespace--if any - self.lexer.input(" ") - tok = self.lexer.token() - if not tok or tok.value != " ": - self.t_SPACE = None - else: - self.t_SPACE = tok.type - - # Determine the token type for newlines - self.lexer.input("\n") - tok = self.lexer.token() - if not tok or tok.value != "\n": - self.t_NEWLINE = None - print("Couldn't determine token for newlines") - else: - self.t_NEWLINE = tok.type - - self.t_WS = (self.t_SPACE, self.t_NEWLINE) - - # Check for other characters used by the preprocessor - chars = [ '<','>','#','##','\\','(',')',',','.'] - for c in chars: - self.lexer.input(c) - tok = self.lexer.token() - if not tok or tok.value != c: - print("Unable to lex '%s' required for preprocessor" % c) - - # ---------------------------------------------------------------------- - # add_path() - # - # Adds a search path to the preprocessor. - # ---------------------------------------------------------------------- - - def add_path(self,path): - self.path.append(path) - - # ---------------------------------------------------------------------- - # group_lines() - # - # Given an input string, this function splits it into lines. Trailing whitespace - # is removed. Any line ending with \ is grouped with the next line. This - # function forms the lowest level of the preprocessor---grouping into text into - # a line-by-line format. - # ---------------------------------------------------------------------- - - def group_lines(self,input): - lex = self.lexer.clone() - lines = [x.rstrip() for x in input.splitlines()] - for i in xrange(len(lines)): - j = i+1 - while lines[i].endswith('\\') and (j < len(lines)): - lines[i] = lines[i][:-1]+lines[j] - lines[j] = "" - j += 1 - - input = "\n".join(lines) - lex.input(input) - lex.lineno = 1 - - current_line = [] - while True: - tok = lex.token() - if not tok: - break - current_line.append(tok) - if tok.type in self.t_WS and '\n' in tok.value: - yield current_line - current_line = [] - - if current_line: - yield current_line - - # ---------------------------------------------------------------------- - # tokenstrip() - # - # Remove leading/trailing whitespace tokens from a token list - # ---------------------------------------------------------------------- - - def tokenstrip(self,tokens): - i = 0 - while i < len(tokens) and tokens[i].type in self.t_WS: - i += 1 - del tokens[:i] - i = len(tokens)-1 - while i >= 0 and tokens[i].type in self.t_WS: - i -= 1 - del tokens[i+1:] - return tokens - - - # ---------------------------------------------------------------------- - # collect_args() - # - # Collects comma separated arguments from a list of tokens. The arguments - # must be enclosed in parenthesis. Returns a tuple (tokencount,args,positions) - # where tokencount is the number of tokens consumed, args is a list of arguments, - # and positions is a list of integers containing the starting index of each - # argument. Each argument is represented by a list of tokens. - # - # When collecting arguments, leading and trailing whitespace is removed - # from each argument. - # - # This function properly handles nested parenthesis and commas---these do not - # define new arguments. - # ---------------------------------------------------------------------- - - def collect_args(self,tokenlist): - args = [] - positions = [] - current_arg = [] - nesting = 1 - tokenlen = len(tokenlist) - - # Search for the opening '('. - i = 0 - while (i < tokenlen) and (tokenlist[i].type in self.t_WS): - i += 1 - - if (i < tokenlen) and (tokenlist[i].value == '('): - positions.append(i+1) - else: - self.error(self.source,tokenlist[0].lineno,"Missing '(' in macro arguments") - return 0, [], [] - - i += 1 - - while i < tokenlen: - t = tokenlist[i] - if t.value == '(': - current_arg.append(t) - nesting += 1 - elif t.value == ')': - nesting -= 1 - if nesting == 0: - if current_arg: - args.append(self.tokenstrip(current_arg)) - positions.append(i) - return i+1,args,positions - current_arg.append(t) - elif t.value == ',' and nesting == 1: - args.append(self.tokenstrip(current_arg)) - positions.append(i+1) - current_arg = [] - else: - current_arg.append(t) - i += 1 - - # Missing end argument - self.error(self.source,tokenlist[-1].lineno,"Missing ')' in macro arguments") - return 0, [],[] - - # ---------------------------------------------------------------------- - # macro_prescan() - # - # Examine the macro value (token sequence) and identify patch points - # This is used to speed up macro expansion later on---we'll know - # right away where to apply patches to the value to form the expansion - # ---------------------------------------------------------------------- - - def macro_prescan(self,macro): - macro.patch = [] # Standard macro arguments - macro.str_patch = [] # String conversion expansion - macro.var_comma_patch = [] # Variadic macro comma patch - i = 0 - while i < len(macro.value): - if macro.value[i].type == self.t_ID and macro.value[i].value in macro.arglist: - argnum = macro.arglist.index(macro.value[i].value) - # Conversion of argument to a string - if i > 0 and macro.value[i-1].value == '#': - macro.value[i] = copy.copy(macro.value[i]) - macro.value[i].type = self.t_STRING - del macro.value[i-1] - macro.str_patch.append((argnum,i-1)) - continue - # Concatenation - elif (i > 0 and macro.value[i-1].value == '##'): - macro.patch.append(('c',argnum,i-1)) - del macro.value[i-1] - i -= 1 - continue - elif ((i+1) < len(macro.value) and macro.value[i+1].value == '##'): - macro.patch.append(('c',argnum,i)) - del macro.value[i + 1] - continue - # Standard expansion - else: - macro.patch.append(('e',argnum,i)) - elif macro.value[i].value == '##': - if macro.variadic and (i > 0) and (macro.value[i-1].value == ',') and \ - ((i+1) < len(macro.value)) and (macro.value[i+1].type == self.t_ID) and \ - (macro.value[i+1].value == macro.vararg): - macro.var_comma_patch.append(i-1) - i += 1 - macro.patch.sort(key=lambda x: x[2],reverse=True) - - # ---------------------------------------------------------------------- - # macro_expand_args() - # - # Given a Macro and list of arguments (each a token list), this method - # returns an expanded version of a macro. The return value is a token sequence - # representing the replacement macro tokens - # ---------------------------------------------------------------------- - - def macro_expand_args(self,macro,args,expanded): - # Make a copy of the macro token sequence - rep = [copy.copy(_x) for _x in macro.value] - - # Make string expansion patches. These do not alter the length of the replacement sequence - - str_expansion = {} - for argnum, i in macro.str_patch: - if argnum not in str_expansion: - str_expansion[argnum] = ('"%s"' % "".join([x.value for x in args[argnum]])).replace("\\","\\\\") - rep[i] = copy.copy(rep[i]) - rep[i].value = str_expansion[argnum] - - # Make the variadic macro comma patch. If the variadic macro argument is empty, we get rid - comma_patch = False - if macro.variadic and not args[-1]: - for i in macro.var_comma_patch: - rep[i] = None - comma_patch = True - - # Make all other patches. The order of these matters. It is assumed that the patch list - # has been sorted in reverse order of patch location since replacements will cause the - # size of the replacement sequence to expand from the patch point. - - expanded_args = { } - for ptype, argnum, i in macro.patch: - # Concatenation. Argument is left unexpanded - if ptype == 'c': - rep[i:i+1] = args[argnum] - # Normal expansion. Argument is macro expanded first - elif ptype == 'e': - if argnum not in expanded_args: - expanded_args[argnum] = self.expand_macros(args[argnum],expanded) - rep[i:i+1] = expanded_args[argnum] - - # Get rid of removed comma if necessary - if comma_patch: - rep = [_i for _i in rep if _i] - - return rep - - - # ---------------------------------------------------------------------- - # expand_macros() - # - # Given a list of tokens, this function performs macro expansion. - # The expanded argument is a dictionary that contains macros already - # expanded. This is used to prevent infinite recursion. - # ---------------------------------------------------------------------- - - def expand_macros(self,tokens,expanded=None): - if expanded is None: - expanded = {} - i = 0 - while i < len(tokens): - t = tokens[i] - if t.type == self.t_ID: - if t.value in self.macros and t.value not in expanded: - # Yes, we found a macro match - expanded[t.value] = True - - m = self.macros[t.value] - if not m.arglist: - # A simple macro - ex = self.expand_macros([copy.copy(_x) for _x in m.value],expanded) - for e in ex: - e.lineno = t.lineno - tokens[i:i+1] = ex - i += len(ex) - else: - # A macro with arguments - j = i + 1 - while j < len(tokens) and tokens[j].type in self.t_WS: - j += 1 - if j < len(tokens) and tokens[j].value == '(': - tokcount,args,positions = self.collect_args(tokens[j:]) - if not m.variadic and len(args) != len(m.arglist): - self.error(self.source,t.lineno,"Macro %s requires %d arguments" % (t.value,len(m.arglist))) - i = j + tokcount - elif m.variadic and len(args) < len(m.arglist)-1: - if len(m.arglist) > 2: - self.error(self.source,t.lineno,"Macro %s must have at least %d arguments" % (t.value, len(m.arglist)-1)) - else: - self.error(self.source,t.lineno,"Macro %s must have at least %d argument" % (t.value, len(m.arglist)-1)) - i = j + tokcount - else: - if m.variadic: - if len(args) == len(m.arglist)-1: - args.append([]) - else: - args[len(m.arglist)-1] = tokens[j+positions[len(m.arglist)-1]:j+tokcount-1] - del args[len(m.arglist):] - - # Get macro replacement text - rep = self.macro_expand_args(m,args,expanded) - rep = self.expand_macros(rep,expanded) - for r in rep: - r.lineno = t.lineno - tokens[i:j+tokcount] = rep - i += len(rep) - else: - # This is not a macro. It is just a word which - # equals to name of the macro. Hence, go to the - # next token. - i += 1 - - del expanded[t.value] - continue - elif t.value == '__LINE__': - t.type = self.t_INTEGER - t.value = self.t_INTEGER_TYPE(t.lineno) - - i += 1 - return tokens - - # ---------------------------------------------------------------------- - # evalexpr() - # - # Evaluate an expression token sequence for the purposes of evaluating - # integral expressions. - # ---------------------------------------------------------------------- - - def evalexpr(self,tokens): - # tokens = tokenize(line) - # Search for defined macros - i = 0 - while i < len(tokens): - if tokens[i].type == self.t_ID and tokens[i].value == 'defined': - j = i + 1 - needparen = False - result = "0L" - while j < len(tokens): - if tokens[j].type in self.t_WS: - j += 1 - continue - elif tokens[j].type == self.t_ID: - if tokens[j].value in self.macros: - result = "1L" - else: - result = "0L" - if not needparen: break - elif tokens[j].value == '(': - needparen = True - elif tokens[j].value == ')': - break - else: - self.error(self.source,tokens[i].lineno,"Malformed defined()") - j += 1 - tokens[i].type = self.t_INTEGER - tokens[i].value = self.t_INTEGER_TYPE(result) - del tokens[i+1:j+1] - i += 1 - tokens = self.expand_macros(tokens) - return self.evalexpr_expanded(tokens) - - # ---------------------------------------------------------------------- - # evalexpr_expanded() - # - # Helper for evalexpr that evaluates the expression that had its macros - # and defined(...) expressions expanded by evalexpr - # ---------------------------------------------------------------------- - - def evalexpr_expanded(self, tokens): - for i,t in enumerate(tokens): - if t.type == self.t_ID: - tokens[i] = copy.copy(t) - tokens[i].type = self.t_INTEGER - tokens[i].value = self.t_INTEGER_TYPE("0") - elif t.type == self.t_INTEGER: - tokens[i] = copy.copy(t) - # Strip off any trailing suffixes - tokens[i].value = str(tokens[i].value) - while tokens[i].value[-1] not in "0123456789abcdefABCDEF": - tokens[i].value = tokens[i].value[:-1] - - return self.evalexpr_string("".join([str(x.value) for x in tokens])) - - # ---------------------------------------------------------------------- - # evalexpr_string() - # - # Helper for evalexpr that evaluates a string expression - # This implementation does basic C->python conversion and then uses eval() - # ---------------------------------------------------------------------- - def evalexpr_string(self, expr): - expr = expr.replace("&&"," and ") - expr = expr.replace("||"," or ") - expr = expr.replace("!"," not ") - expr = expr.replace(" not ="," !=") - try: - result = eval(expr) - except Exception: - self.error(self.source,tokens[0].lineno,"Couldn't evaluate expression") - result = 0 - return result - - # ---------------------------------------------------------------------- - # parsegen() - # - # Parse an input string/ - # ---------------------------------------------------------------------- - def parsegen(self,input,source=None): - - # Replace trigraph sequences - t = trigraph(input) - lines = self.group_lines(t) - - if not source: - source = "" - - self.define("__FILE__ \"%s\"" % source) - - self.source = source - chunk = [] - enable = True - iftrigger = False - ifstack = [] - - for x in lines: - for i,tok in enumerate(x): - if tok.type not in self.t_WS: break - if tok.value == '#': - # Preprocessor directive - - # insert necessary whitespace instead of eaten tokens - for tok in x: - if tok.type in self.t_WS and '\n' in tok.value: - chunk.append(tok) - - dirtokens = self.tokenstrip(x[i+1:]) - if dirtokens: - name = dirtokens[0].value - args = self.tokenstrip(dirtokens[1:]) - else: - name = "" - args = [] - - if name == 'define': - if enable: - for tok in self.expand_macros(chunk): - yield tok - chunk = [] - self.define(args) - elif name == 'include': - if enable: - for tok in self.expand_macros(chunk): - yield tok - chunk = [] - oldfile = self.macros['__FILE__'] - for tok in self.include(args): - yield tok - self.macros['__FILE__'] = oldfile - self.source = source - elif name == 'undef': - if enable: - for tok in self.expand_macros(chunk): - yield tok - chunk = [] - self.undef(args) - elif name == 'ifdef': - ifstack.append((enable,iftrigger)) - if enable: - if not args[0].value in self.macros: - enable = False - iftrigger = False - else: - iftrigger = True - elif name == 'ifndef': - ifstack.append((enable,iftrigger)) - if enable: - if args[0].value in self.macros: - enable = False - iftrigger = False - else: - iftrigger = True - elif name == 'if': - ifstack.append((enable,iftrigger)) - if enable: - result = self.evalexpr(args) - if not result: - enable = False - iftrigger = False - else: - iftrigger = True - elif name == 'elif': - if ifstack: - if ifstack[-1][0]: # We only pay attention if outer "if" allows this - if enable: # If already true, we flip enable False - enable = False - elif not iftrigger: # If False, but not triggered yet, we'll check expression - result = self.evalexpr(args) - if result: - enable = True - iftrigger = True - else: - self.error(self.source,dirtokens[0].lineno,"Misplaced #elif") - - elif name == 'else': - if ifstack: - if ifstack[-1][0]: - if enable: - enable = False - elif not iftrigger: - enable = True - iftrigger = True - else: - self.error(self.source,dirtokens[0].lineno,"Misplaced #else") - - elif name == 'endif': - if ifstack: - enable,iftrigger = ifstack.pop() - else: - self.error(self.source,dirtokens[0].lineno,"Misplaced #endif") - else: - # Unknown preprocessor directive - pass - - else: - # Normal text - if enable: - chunk.extend(x) - - for tok in self.expand_macros(chunk): - yield tok - chunk = [] - - # ---------------------------------------------------------------------- - # include() - # - # Implementation of file-inclusion - # ---------------------------------------------------------------------- - - def include(self,tokens): - # Try to extract the filename and then process an include file - if not tokens: - return - if tokens: - if tokens[0].value != '<' and tokens[0].type != self.t_STRING: - tokens = self.expand_macros(tokens) - - if tokens[0].value == '<': - # Include <...> - i = 1 - while i < len(tokens): - if tokens[i].value == '>': - break - i += 1 - else: - print("Malformed #include <...>") - return - filename = "".join([x.value for x in tokens[1:i]]) - path = self.path + [""] + self.temp_path - elif tokens[0].type == self.t_STRING: - filename = tokens[0].value[1:-1] - path = self.temp_path + [""] + self.path - else: - print("Malformed #include statement") - return - for p in path: - iname = os.path.join(p,filename) - try: - data = self.read_include_file(iname) - dname = os.path.dirname(iname) - if dname: - self.temp_path.insert(0,dname) - for tok in self.parsegen(data,filename): - yield tok - if dname: - del self.temp_path[0] - break - except IOError: - pass - else: - print("Couldn't find '%s'" % filename) - - # ---------------------------------------------------------------------- - # read_include_file() - # - # Reads a source file for inclusion using #include - # Could be overridden to e.g. customize encoding, limit access to - # certain paths on the filesystem, or provide the contents of system - # include files - # ---------------------------------------------------------------------- - - def read_include_file(self, filepath): - with open(filepath, 'r', encoding='utf-8', errors='surrogateescape') as file: - return file.read() - - # ---------------------------------------------------------------------- - # define() - # - # Define a new macro - # ---------------------------------------------------------------------- - - def define(self,tokens): - if isinstance(tokens,STRING_TYPES): - tokens = self.tokenize(tokens) - - linetok = tokens - try: - name = linetok[0] - if len(linetok) > 1: - mtype = linetok[1] - else: - mtype = None - if not mtype: - m = Macro(name.value,[]) - self.macros[name.value] = m - elif mtype.type in self.t_WS: - # A normal macro - m = Macro(name.value,self.tokenstrip(linetok[2:])) - self.macros[name.value] = m - elif mtype.value == '(': - # A macro with arguments - tokcount, args, positions = self.collect_args(linetok[1:]) - variadic = False - for a in args: - if variadic: - print("No more arguments may follow a variadic argument") - break - astr = "".join([str(_i.value) for _i in a]) - if astr == "...": - variadic = True - a[0].type = self.t_ID - a[0].value = '__VA_ARGS__' - variadic = True - del a[1:] - continue - elif astr[-3:] == "..." and a[0].type == self.t_ID: - variadic = True - del a[1:] - # If, for some reason, "." is part of the identifier, strip off the name for the purposes - # of macro expansion - if a[0].value[-3:] == '...': - a[0].value = a[0].value[:-3] - continue - if len(a) > 1 or a[0].type != self.t_ID: - print("Invalid macro argument") - break - else: - mvalue = self.tokenstrip(linetok[1+tokcount:]) - i = 0 - while i < len(mvalue): - if i+1 < len(mvalue): - if mvalue[i].type in self.t_WS and mvalue[i+1].value == '##': - del mvalue[i] - continue - elif mvalue[i].value == '##' and mvalue[i+1].type in self.t_WS: - del mvalue[i+1] - i += 1 - m = Macro(name.value,mvalue,[x[0].value for x in args],variadic) - self.macro_prescan(m) - self.macros[name.value] = m - else: - print("Bad macro definition") - except LookupError: - print("Bad macro definition") - - # ---------------------------------------------------------------------- - # undef() - # - # Undefine a macro - # ---------------------------------------------------------------------- - - def undef(self,tokens): - id = tokens[0].value - try: - del self.macros[id] - except LookupError: - pass - - # ---------------------------------------------------------------------- - # parse() - # - # Parse input text. - # ---------------------------------------------------------------------- - def parse(self,input,source=None,ignore={}): - self.ignore = ignore - self.parser = self.parsegen(input,source) - - # ---------------------------------------------------------------------- - # token() - # - # Method to return individual tokens - # ---------------------------------------------------------------------- - def token(self): - try: - while True: - tok = next(self.parser) - if tok.type not in self.ignore: return tok - except StopIteration: - self.parser = None - return None - -if __name__ == '__main__': - import ply.lex as lex - lexer = lex.lex() - - # Run a preprocessor - import sys - with open(sys.argv[1]) as f: - input = f.read() - - p = Preprocessor(lexer) - p.parse(input,sys.argv[1]) - while True: - tok = p.token() - if not tok: break - print(p.source, tok) diff --git a/xonsh/ply/ply/ctokens.py b/xonsh/ply/ply/ctokens.py deleted file mode 100644 index b265e59..0000000 --- a/xonsh/ply/ply/ctokens.py +++ /dev/null @@ -1,127 +0,0 @@ -# ---------------------------------------------------------------------- -# ctokens.py -# -# Token specifications for symbols in ANSI C and C++. This file is -# meant to be used as a library in other tokenizers. -# ---------------------------------------------------------------------- - -# Reserved words - -tokens = [ - # Literals (identifier, integer constant, float constant, string constant, char const) - 'ID', 'TYPEID', 'INTEGER', 'FLOAT', 'STRING', 'CHARACTER', - - # Operators (+,-,*,/,%,|,&,~,^,<<,>>, ||, &&, !, <, <=, >, >=, ==, !=) - 'PLUS', 'MINUS', 'TIMES', 'DIVIDE', 'MODULO', - 'OR', 'AND', 'NOT', 'XOR', 'LSHIFT', 'RSHIFT', - 'LOR', 'LAND', 'LNOT', - 'LT', 'LE', 'GT', 'GE', 'EQ', 'NE', - - # Assignment (=, *=, /=, %=, +=, -=, <<=, >>=, &=, ^=, |=) - 'EQUALS', 'TIMESEQUAL', 'DIVEQUAL', 'MODEQUAL', 'PLUSEQUAL', 'MINUSEQUAL', - 'LSHIFTEQUAL','RSHIFTEQUAL', 'ANDEQUAL', 'XOREQUAL', 'OREQUAL', - - # Increment/decrement (++,--) - 'INCREMENT', 'DECREMENT', - - # Structure dereference (->) - 'ARROW', - - # Ternary operator (?) - 'TERNARY', - - # Delimeters ( ) [ ] { } , . ; : - 'LPAREN', 'RPAREN', - 'LBRACKET', 'RBRACKET', - 'LBRACE', 'RBRACE', - 'COMMA', 'PERIOD', 'SEMI', 'COLON', - - # Ellipsis (...) - 'ELLIPSIS', -] - -# Operators -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_MODULO = r'%' -t_OR = r'\|' -t_AND = r'&' -t_NOT = r'~' -t_XOR = r'\^' -t_LSHIFT = r'<<' -t_RSHIFT = r'>>' -t_LOR = r'\|\|' -t_LAND = r'&&' -t_LNOT = r'!' -t_LT = r'<' -t_GT = r'>' -t_LE = r'<=' -t_GE = r'>=' -t_EQ = r'==' -t_NE = r'!=' - -# Assignment operators - -t_EQUALS = r'=' -t_TIMESEQUAL = r'\*=' -t_DIVEQUAL = r'/=' -t_MODEQUAL = r'%=' -t_PLUSEQUAL = r'\+=' -t_MINUSEQUAL = r'-=' -t_LSHIFTEQUAL = r'<<=' -t_RSHIFTEQUAL = r'>>=' -t_ANDEQUAL = r'&=' -t_OREQUAL = r'\|=' -t_XOREQUAL = r'\^=' - -# Increment/decrement -t_INCREMENT = r'\+\+' -t_DECREMENT = r'--' - -# -> -t_ARROW = r'->' - -# ? -t_TERNARY = r'\?' - -# Delimeters -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_LBRACKET = r'\[' -t_RBRACKET = r'\]' -t_LBRACE = r'\{' -t_RBRACE = r'\}' -t_COMMA = r',' -t_PERIOD = r'\.' -t_SEMI = r';' -t_COLON = r':' -t_ELLIPSIS = r'\.\.\.' - -# Identifiers -t_ID = r'[A-Za-z_][A-Za-z0-9_]*' - -# Integer literal -t_INTEGER = r'\d+([uU]|[lL]|[uU][lL]|[lL][uU])?' - -# Floating literal -t_FLOAT = r'((\d+)(\.\d+)(e(\+|-)?(\d+))? | (\d+)e(\+|-)?(\d+))([lL]|[fF])?' - -# String literal -t_STRING = r'\"([^\\\n]|(\\.))*?\"' - -# Character constant 'c' or L'c' -t_CHARACTER = r'(L)?\'([^\\\n]|(\\.))*?\'' - -# Comment (C-Style) -def t_COMMENT(t): - r'/\*(.|\n)*?\*/' - t.lexer.lineno += t.value.count('\n') - return t - -# Comment (C++-Style) -def t_CPPCOMMENT(t): - r'//.*\n' - t.lexer.lineno += 1 - return t diff --git a/xonsh/ply/ply/lex.py b/xonsh/ply/ply/lex.py deleted file mode 100644 index bc9ed34..0000000 --- a/xonsh/ply/ply/lex.py +++ /dev/null @@ -1,1099 +0,0 @@ -# ----------------------------------------------------------------------------- -# ply: lex.py -# -# Copyright (C) 2001-2019 -# David M. Beazley (Dabeaz LLC) -# All rights reserved. -# -# Latest version: https://github.com/dabeaz/ply -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of David Beazley or Dabeaz LLC may be used to -# endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------------------------- - -__version__ = '3.11' -__tabversion__ = '3.10' - -import re -import sys -import types -import copy -import os -import inspect - -# This tuple contains known string types -try: - # Python 2.6 - StringTypes = (types.StringType, types.UnicodeType) -except AttributeError: - # Python 3.0 - StringTypes = (str, bytes) - -# This regular expression is used to match valid token names -_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$') - -# Exception thrown when invalid token encountered and no default error -# handler is defined. -class LexError(Exception): - def __init__(self, message, s): - self.args = (message,) - self.text = s - - -# Token class. This class is used to represent the tokens produced. -class LexToken(object): - def __str__(self): - return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos) - - def __repr__(self): - return str(self) - - -# This object is a stand-in for a logging object created by the -# logging module. - -class PlyLogger(object): - def __init__(self, f): - self.f = f - - def critical(self, msg, *args, **kwargs): - self.f.write((msg % args) + '\n') - - def warning(self, msg, *args, **kwargs): - self.f.write('WARNING: ' + (msg % args) + '\n') - - def error(self, msg, *args, **kwargs): - self.f.write('ERROR: ' + (msg % args) + '\n') - - info = critical - debug = critical - - -# Null logger is used when no output is generated. Does nothing. -class NullLogger(object): - def __getattribute__(self, name): - return self - - def __call__(self, *args, **kwargs): - return self - - -# ----------------------------------------------------------------------------- -# === Lexing Engine === -# -# The following Lexer class implements the lexer runtime. There are only -# a few public methods and attributes: -# -# input() - Store a new string in the lexer -# token() - Get the next token -# clone() - Clone the lexer -# -# lineno - Current line number -# lexpos - Current position in the input string -# ----------------------------------------------------------------------------- - -class Lexer: - def __init__(self): - self.lexre = None # Master regular expression. This is a list of - # tuples (re, findex) where re is a compiled - # regular expression and findex is a list - # mapping regex group numbers to rules - self.lexretext = None # Current regular expression strings - self.lexstatere = {} # Dictionary mapping lexer states to master regexs - self.lexstateretext = {} # Dictionary mapping lexer states to regex strings - self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names - self.lexstate = 'INITIAL' # Current lexer state - self.lexstatestack = [] # Stack of lexer states - self.lexstateinfo = None # State information - self.lexstateignore = {} # Dictionary of ignored characters for each state - self.lexstateerrorf = {} # Dictionary of error functions for each state - self.lexstateeoff = {} # Dictionary of eof functions for each state - self.lexreflags = 0 # Optional re compile flags - self.lexdata = None # Actual input data (as a string) - self.lexpos = 0 # Current position in input text - self.lexlen = 0 # Length of the input text - self.lexerrorf = None # Error rule (if any) - self.lexeoff = None # EOF rule (if any) - self.lextokens = None # List of valid tokens - self.lexignore = '' # Ignored characters - self.lexliterals = '' # Literal characters that can be passed through - self.lexmodule = None # Module - self.lineno = 1 # Current line number - self.lexoptimize = False # Optimized mode - - def clone(self, object=None): - c = copy.copy(self) - - # If the object parameter has been supplied, it means we are attaching the - # lexer to a new object. In this case, we have to rebind all methods in - # the lexstatere and lexstateerrorf tables. - - if object: - newtab = {} - for key, ritem in self.lexstatere.items(): - newre = [] - for cre, findex in ritem: - newfindex = [] - for f in findex: - if not f or not f[0]: - newfindex.append(f) - continue - newfindex.append((getattr(object, f[0].__name__), f[1])) - newre.append((cre, newfindex)) - newtab[key] = newre - c.lexstatere = newtab - c.lexstateerrorf = {} - for key, ef in self.lexstateerrorf.items(): - c.lexstateerrorf[key] = getattr(object, ef.__name__) - c.lexmodule = object - return c - - # ------------------------------------------------------------ - # writetab() - Write lexer information to a table file - # ------------------------------------------------------------ - def writetab(self, lextab, outputdir=''): - if isinstance(lextab, types.ModuleType): - raise IOError("Won't overwrite existing lextab module") - basetabmodule = lextab.split('.')[-1] - filename = os.path.join(outputdir, basetabmodule) + '.py' - with open(filename, 'w') as tf: - tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__)) - tf.write('_tabversion = %s\n' % repr(__tabversion__)) - tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens)))) - tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags))) - tf.write('_lexliterals = %s\n' % repr(self.lexliterals)) - tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo)) - - # Rewrite the lexstatere table, replacing function objects with function names - tabre = {} - for statename, lre in self.lexstatere.items(): - titem = [] - for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]): - titem.append((retext, _funcs_to_names(func, renames))) - tabre[statename] = titem - - tf.write('_lexstatere = %s\n' % repr(tabre)) - tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore)) - - taberr = {} - for statename, ef in self.lexstateerrorf.items(): - taberr[statename] = ef.__name__ if ef else None - tf.write('_lexstateerrorf = %s\n' % repr(taberr)) - - tabeof = {} - for statename, ef in self.lexstateeoff.items(): - tabeof[statename] = ef.__name__ if ef else None - tf.write('_lexstateeoff = %s\n' % repr(tabeof)) - - # ------------------------------------------------------------ - # readtab() - Read lexer information from a tab file - # ------------------------------------------------------------ - def readtab(self, tabfile, fdict): - if isinstance(tabfile, types.ModuleType): - lextab = tabfile - else: - exec('import %s' % tabfile) - lextab = sys.modules[tabfile] - - if getattr(lextab, '_tabversion', '0.0') != __tabversion__: - raise ImportError('Inconsistent PLY version') - - self.lextokens = lextab._lextokens - self.lexreflags = lextab._lexreflags - self.lexliterals = lextab._lexliterals - self.lextokens_all = self.lextokens | set(self.lexliterals) - self.lexstateinfo = lextab._lexstateinfo - self.lexstateignore = lextab._lexstateignore - self.lexstatere = {} - self.lexstateretext = {} - for statename, lre in lextab._lexstatere.items(): - titem = [] - txtitem = [] - for pat, func_name in lre: - titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict))) - - self.lexstatere[statename] = titem - self.lexstateretext[statename] = txtitem - - self.lexstateerrorf = {} - for statename, ef in lextab._lexstateerrorf.items(): - self.lexstateerrorf[statename] = fdict[ef] - - self.lexstateeoff = {} - for statename, ef in lextab._lexstateeoff.items(): - self.lexstateeoff[statename] = fdict[ef] - - self.begin('INITIAL') - - # ------------------------------------------------------------ - # input() - Push a new string into the lexer - # ------------------------------------------------------------ - def input(self, s): - # Pull off the first character to see if s looks like a string - c = s[:1] - if not isinstance(c, StringTypes): - raise ValueError('Expected a string') - self.lexdata = s - self.lexpos = 0 - self.lexlen = len(s) - - # ------------------------------------------------------------ - # begin() - Changes the lexing state - # ------------------------------------------------------------ - def begin(self, state): - if state not in self.lexstatere: - raise ValueError('Undefined state') - self.lexre = self.lexstatere[state] - self.lexretext = self.lexstateretext[state] - self.lexignore = self.lexstateignore.get(state, '') - self.lexerrorf = self.lexstateerrorf.get(state, None) - self.lexeoff = self.lexstateeoff.get(state, None) - self.lexstate = state - - # ------------------------------------------------------------ - # push_state() - Changes the lexing state and saves old on stack - # ------------------------------------------------------------ - def push_state(self, state): - self.lexstatestack.append(self.lexstate) - self.begin(state) - - # ------------------------------------------------------------ - # pop_state() - Restores the previous state - # ------------------------------------------------------------ - def pop_state(self): - self.begin(self.lexstatestack.pop()) - - # ------------------------------------------------------------ - # current_state() - Returns the current lexing state - # ------------------------------------------------------------ - def current_state(self): - return self.lexstate - - # ------------------------------------------------------------ - # skip() - Skip ahead n characters - # ------------------------------------------------------------ - def skip(self, n): - self.lexpos += n - - # ------------------------------------------------------------ - # opttoken() - Return the next token from the Lexer - # - # Note: This function has been carefully implemented to be as fast - # as possible. Don't make changes unless you really know what - # you are doing - # ------------------------------------------------------------ - def token(self): - # Make local copies of frequently referenced attributes - lexpos = self.lexpos - lexlen = self.lexlen - lexignore = self.lexignore - lexdata = self.lexdata - - while lexpos < lexlen: - # This code provides some short-circuit code for whitespace, tabs, and other ignored characters - if lexdata[lexpos] in lexignore: - lexpos += 1 - continue - - # Look for a regular expression match - for lexre, lexindexfunc in self.lexre: - m = lexre.match(lexdata, lexpos) - if not m: - continue - - # Create a token for return - tok = LexToken() - tok.value = m.group() - tok.lineno = self.lineno - tok.lexpos = lexpos - - i = m.lastindex - func, tok.type = lexindexfunc[i] - - if not func: - # If no token type was set, it's an ignored token - if tok.type: - self.lexpos = m.end() - return tok - else: - lexpos = m.end() - break - - lexpos = m.end() - - # If token is processed by a function, call it - - tok.lexer = self # Set additional attributes useful in token rules - self.lexmatch = m - self.lexpos = lexpos - - newtok = func(tok) - - # Every function must return a token, if nothing, we just move to next token - if not newtok: - lexpos = self.lexpos # This is here in case user has updated lexpos. - lexignore = self.lexignore # This is here in case there was a state change - break - - # Verify type of the token. If not in the token map, raise an error - if not self.lexoptimize: - if newtok.type not in self.lextokens_all: - raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % ( - func.__code__.co_filename, func.__code__.co_firstlineno, - func.__name__, newtok.type), lexdata[lexpos:]) - - return newtok - else: - # No match, see if in literals - if lexdata[lexpos] in self.lexliterals: - tok = LexToken() - tok.value = lexdata[lexpos] - tok.lineno = self.lineno - tok.type = tok.value - tok.lexpos = lexpos - self.lexpos = lexpos + 1 - return tok - - # No match. Call t_error() if defined. - if self.lexerrorf: - tok = LexToken() - tok.value = self.lexdata[lexpos:] - tok.lineno = self.lineno - tok.type = 'error' - tok.lexer = self - tok.lexpos = lexpos - self.lexpos = lexpos - newtok = self.lexerrorf(tok) - if lexpos == self.lexpos: - # Error method didn't change text position at all. This is an error. - raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:]) - lexpos = self.lexpos - if not newtok: - continue - return newtok - - self.lexpos = lexpos - raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:]) - - if self.lexeoff: - tok = LexToken() - tok.type = 'eof' - tok.value = '' - tok.lineno = self.lineno - tok.lexpos = lexpos - tok.lexer = self - self.lexpos = lexpos - newtok = self.lexeoff(tok) - return newtok - - self.lexpos = lexpos + 1 - if self.lexdata is None: - raise RuntimeError('No input string given with input()') - return None - - # Iterator interface - def __iter__(self): - return self - - def next(self): - t = self.token() - if t is None: - raise StopIteration - return t - - __next__ = next - -# ----------------------------------------------------------------------------- -# ==== Lex Builder === -# -# The functions and classes below are used to collect lexing information -# and build a Lexer object from it. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# _get_regex(func) -# -# Returns the regular expression assigned to a function either as a doc string -# or as a .regex attribute attached by the @TOKEN decorator. -# ----------------------------------------------------------------------------- -def _get_regex(func): - return getattr(func, 'regex', func.__doc__) - -# ----------------------------------------------------------------------------- -# get_caller_module_dict() -# -# This function returns a dictionary containing all of the symbols defined within -# a caller further down the call stack. This is used to get the environment -# associated with the yacc() call if none was provided. -# ----------------------------------------------------------------------------- -def get_caller_module_dict(levels): - f = sys._getframe(levels) - ldict = f.f_globals.copy() - if f.f_globals != f.f_locals: - ldict.update(f.f_locals) - return ldict - -# ----------------------------------------------------------------------------- -# _funcs_to_names() -# -# Given a list of regular expression functions, this converts it to a list -# suitable for output to a table file -# ----------------------------------------------------------------------------- -def _funcs_to_names(funclist, namelist): - result = [] - for f, name in zip(funclist, namelist): - if f and f[0]: - result.append((name, f[1])) - else: - result.append(f) - return result - -# ----------------------------------------------------------------------------- -# _names_to_funcs() -# -# Given a list of regular expression function names, this converts it back to -# functions. -# ----------------------------------------------------------------------------- -def _names_to_funcs(namelist, fdict): - result = [] - for n in namelist: - if n and n[0]: - result.append((fdict[n[0]], n[1])) - else: - result.append(n) - return result - -# ----------------------------------------------------------------------------- -# _form_master_re() -# -# This function takes a list of all of the regex components and attempts to -# form the master regular expression. Given limitations in the Python re -# module, it may be necessary to break the master regex into separate expressions. -# ----------------------------------------------------------------------------- -def _form_master_re(relist, reflags, ldict, toknames): - if not relist: - return [] - regex = '|'.join(relist) - try: - lexre = re.compile(regex, reflags) - - # Build the index to function map for the matching engine - lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1) - lexindexnames = lexindexfunc[:] - - for f, i in lexre.groupindex.items(): - handle = ldict.get(f, None) - if type(handle) in (types.FunctionType, types.MethodType): - lexindexfunc[i] = (handle, toknames[f]) - lexindexnames[i] = f - elif handle is not None: - lexindexnames[i] = f - if f.find('ignore_') > 0: - lexindexfunc[i] = (None, None) - else: - lexindexfunc[i] = (None, toknames[f]) - - return [(lexre, lexindexfunc)], [regex], [lexindexnames] - except Exception: - m = int(len(relist)/2) - if m == 0: - m = 1 - llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames) - rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames) - return (llist+rlist), (lre+rre), (lnames+rnames) - -# ----------------------------------------------------------------------------- -# def _statetoken(s,names) -# -# Given a declaration name s of the form "t_" and a dictionary whose keys are -# state names, this function returns a tuple (states,tokenname) where states -# is a tuple of state names and tokenname is the name of the token. For example, -# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM') -# ----------------------------------------------------------------------------- -def _statetoken(s, names): - parts = s.split('_') - for i, part in enumerate(parts[1:], 1): - if part not in names and part != 'ANY': - break - - if i > 1: - states = tuple(parts[1:i]) - else: - states = ('INITIAL',) - - if 'ANY' in states: - states = tuple(names) - - tokenname = '_'.join(parts[i:]) - return (states, tokenname) - - -# ----------------------------------------------------------------------------- -# LexerReflect() -# -# This class represents information needed to build a lexer as extracted from a -# user's input file. -# ----------------------------------------------------------------------------- -class LexerReflect(object): - def __init__(self, ldict, log=None, reflags=0): - self.ldict = ldict - self.error_func = None - self.tokens = [] - self.reflags = reflags - self.stateinfo = {'INITIAL': 'inclusive'} - self.modules = set() - self.error = False - self.log = PlyLogger(sys.stderr) if log is None else log - - # Get all of the basic information - def get_all(self): - self.get_tokens() - self.get_literals() - self.get_states() - self.get_rules() - - # Validate all of the information - def validate_all(self): - self.validate_tokens() - self.validate_literals() - self.validate_rules() - return self.error - - # Get the tokens map - def get_tokens(self): - tokens = self.ldict.get('tokens', None) - if not tokens: - self.log.error('No token list is defined') - self.error = True - return - - if not isinstance(tokens, (list, tuple)): - self.log.error('tokens must be a list or tuple') - self.error = True - return - - if not tokens: - self.log.error('tokens is empty') - self.error = True - return - - self.tokens = tokens - - # Validate the tokens - def validate_tokens(self): - terminals = {} - for n in self.tokens: - if not _is_identifier.match(n): - self.log.error("Bad token name '%s'", n) - self.error = True - if n in terminals: - self.log.warning("Token '%s' multiply defined", n) - terminals[n] = 1 - - # Get the literals specifier - def get_literals(self): - self.literals = self.ldict.get('literals', '') - if not self.literals: - self.literals = '' - - # Validate literals - def validate_literals(self): - try: - for c in self.literals: - if not isinstance(c, StringTypes) or len(c) > 1: - self.log.error('Invalid literal %s. Must be a single character', repr(c)) - self.error = True - - except TypeError: - self.log.error('Invalid literals specification. literals must be a sequence of characters') - self.error = True - - def get_states(self): - self.states = self.ldict.get('states', None) - # Build statemap - if self.states: - if not isinstance(self.states, (tuple, list)): - self.log.error('states must be defined as a tuple or list') - self.error = True - else: - for s in self.states: - if not isinstance(s, tuple) or len(s) != 2: - self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s)) - self.error = True - continue - name, statetype = s - if not isinstance(name, StringTypes): - self.log.error('State name %s must be a string', repr(name)) - self.error = True - continue - if not (statetype == 'inclusive' or statetype == 'exclusive'): - self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name) - self.error = True - continue - if name in self.stateinfo: - self.log.error("State '%s' already defined", name) - self.error = True - continue - self.stateinfo[name] = statetype - - # Get all of the symbols with a t_ prefix and sort them into various - # categories (functions, strings, error functions, and ignore characters) - - def get_rules(self): - tsymbols = [f for f in self.ldict if f[:2] == 't_'] - - # Now build up a list of functions and a list of strings - self.toknames = {} # Mapping of symbols to token names - self.funcsym = {} # Symbols defined as functions - self.strsym = {} # Symbols defined as strings - self.ignore = {} # Ignore strings by state - self.errorf = {} # Error functions by state - self.eoff = {} # EOF functions by state - - for s in self.stateinfo: - self.funcsym[s] = [] - self.strsym[s] = [] - - if len(tsymbols) == 0: - self.log.error('No rules of the form t_rulename are defined') - self.error = True - return - - for f in tsymbols: - t = self.ldict[f] - states, tokname = _statetoken(f, self.stateinfo) - self.toknames[f] = tokname - - if hasattr(t, '__call__'): - if tokname == 'error': - for s in states: - self.errorf[s] = t - elif tokname == 'eof': - for s in states: - self.eoff[s] = t - elif tokname == 'ignore': - line = t.__code__.co_firstlineno - file = t.__code__.co_filename - self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__) - self.error = True - else: - for s in states: - self.funcsym[s].append((f, t)) - elif isinstance(t, StringTypes): - if tokname == 'ignore': - for s in states: - self.ignore[s] = t - if '\\' in t: - self.log.warning("%s contains a literal backslash '\\'", f) - - elif tokname == 'error': - self.log.error("Rule '%s' must be defined as a function", f) - self.error = True - else: - for s in states: - self.strsym[s].append((f, t)) - else: - self.log.error('%s not defined as a function or string', f) - self.error = True - - # Sort the functions by line number - for f in self.funcsym.values(): - f.sort(key=lambda x: x[1].__code__.co_firstlineno) - - # Sort the strings by regular expression length - for s in self.strsym.values(): - s.sort(key=lambda x: len(x[1]), reverse=True) - - # Validate all of the t_rules collected - def validate_rules(self): - for state in self.stateinfo: - # Validate all rules defined by functions - - for fname, f in self.funcsym[state]: - line = f.__code__.co_firstlineno - file = f.__code__.co_filename - module = inspect.getmodule(f) - self.modules.add(module) - - tokname = self.toknames[fname] - if isinstance(f, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - nargs = f.__code__.co_argcount - if nargs > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) - self.error = True - continue - - if nargs < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) - self.error = True - continue - - if not _get_regex(f): - self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__) - self.error = True - continue - - try: - c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags) - if c.match(''): - self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__) - self.error = True - except re.error as e: - self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e) - if '#' in _get_regex(f): - self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__) - self.error = True - - # Validate all rules defined by strings - for name, r in self.strsym[state]: - tokname = self.toknames[name] - if tokname == 'error': - self.log.error("Rule '%s' must be defined as a function", name) - self.error = True - continue - - if tokname not in self.tokens and tokname.find('ignore_') < 0: - self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname) - self.error = True - continue - - try: - c = re.compile('(?P<%s>%s)' % (name, r), self.reflags) - if (c.match('')): - self.log.error("Regular expression for rule '%s' matches empty string", name) - self.error = True - except re.error as e: - self.log.error("Invalid regular expression for rule '%s'. %s", name, e) - if '#' in r: - self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name) - self.error = True - - if not self.funcsym[state] and not self.strsym[state]: - self.log.error("No rules defined for state '%s'", state) - self.error = True - - # Validate the error function - efunc = self.errorf.get(state, None) - if efunc: - f = efunc - line = f.__code__.co_firstlineno - file = f.__code__.co_filename - module = inspect.getmodule(f) - self.modules.add(module) - - if isinstance(f, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - nargs = f.__code__.co_argcount - if nargs > reqargs: - self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__) - self.error = True - - if nargs < reqargs: - self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__) - self.error = True - - for module in self.modules: - self.validate_module(module) - - # ----------------------------------------------------------------------------- - # validate_module() - # - # This checks to see if there are duplicated t_rulename() functions or strings - # in the parser input file. This is done using a simple regular expression - # match on each line in the source code of the given module. - # ----------------------------------------------------------------------------- - - def validate_module(self, module): - try: - lines, linen = inspect.getsourcelines(module) - except IOError: - return - - fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(') - sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=') - - counthash = {} - linen += 1 - for line in lines: - m = fre.match(line) - if not m: - m = sre.match(line) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - filename = inspect.getsourcefile(module) - self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev) - self.error = True - linen += 1 - -# ----------------------------------------------------------------------------- -# lex(module) -# -# Build all of the regular expression rules from definitions in the supplied module -# ----------------------------------------------------------------------------- -def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab', - reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None): - - if lextab is None: - lextab = 'lextab' - - global lexer - - ldict = None - stateinfo = {'INITIAL': 'inclusive'} - lexobj = Lexer() - lexobj.lexoptimize = optimize - global token, input - - if errorlog is None: - errorlog = PlyLogger(sys.stderr) - - if debug: - if debuglog is None: - debuglog = PlyLogger(sys.stderr) - - # Get the module dictionary used for the lexer - if object: - module = object - - # Get the module dictionary used for the parser - if module: - _items = [(k, getattr(module, k)) for k in dir(module)] - ldict = dict(_items) - # If no __file__ attribute is available, try to obtain it from the __module__ instead - if '__file__' not in ldict: - ldict['__file__'] = sys.modules[ldict['__module__']].__file__ - else: - ldict = get_caller_module_dict(2) - - # Determine if the module is package of a package or not. - # If so, fix the tabmodule setting so that tables load correctly - pkg = ldict.get('__package__') - if pkg and isinstance(lextab, str): - if '.' not in lextab: - lextab = pkg + '.' + lextab - - # Collect parser information from the dictionary - linfo = LexerReflect(ldict, log=errorlog, reflags=reflags) - linfo.get_all() - if not optimize: - if linfo.validate_all(): - raise SyntaxError("Can't build lexer") - - if optimize and lextab: - try: - lexobj.readtab(lextab, ldict) - token = lexobj.token - input = lexobj.input - lexer = lexobj - return lexobj - - except ImportError: - pass - - # Dump some basic debugging information - if debug: - debuglog.info('lex: tokens = %r', linfo.tokens) - debuglog.info('lex: literals = %r', linfo.literals) - debuglog.info('lex: states = %r', linfo.stateinfo) - - # Build a dictionary of valid token names - lexobj.lextokens = set() - for n in linfo.tokens: - lexobj.lextokens.add(n) - - # Get literals specification - if isinstance(linfo.literals, (list, tuple)): - lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals) - else: - lexobj.lexliterals = linfo.literals - - lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals) - - # Get the stateinfo dictionary - stateinfo = linfo.stateinfo - - regexs = {} - # Build the master regular expressions - for state in stateinfo: - regex_list = [] - - # Add rules defined by functions first - for fname, f in linfo.funcsym[state]: - regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f))) - if debug: - debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state) - - # Now add all of the simple rules - for name, r in linfo.strsym[state]: - regex_list.append('(?P<%s>%s)' % (name, r)) - if debug: - debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state) - - regexs[state] = regex_list - - # Build the master regular expressions - - if debug: - debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====') - - for state in regexs: - lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames) - lexobj.lexstatere[state] = lexre - lexobj.lexstateretext[state] = re_text - lexobj.lexstaterenames[state] = re_names - if debug: - for i, text in enumerate(re_text): - debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text) - - # For inclusive states, we need to add the regular expressions from the INITIAL state - for state, stype in stateinfo.items(): - if state != 'INITIAL' and stype == 'inclusive': - lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL']) - lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL']) - lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL']) - - lexobj.lexstateinfo = stateinfo - lexobj.lexre = lexobj.lexstatere['INITIAL'] - lexobj.lexretext = lexobj.lexstateretext['INITIAL'] - lexobj.lexreflags = reflags - - # Set up ignore variables - lexobj.lexstateignore = linfo.ignore - lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '') - - # Set up error functions - lexobj.lexstateerrorf = linfo.errorf - lexobj.lexerrorf = linfo.errorf.get('INITIAL', None) - if not lexobj.lexerrorf: - errorlog.warning('No t_error rule is defined') - - # Set up eof functions - lexobj.lexstateeoff = linfo.eoff - lexobj.lexeoff = linfo.eoff.get('INITIAL', None) - - # Check state information for ignore and error rules - for s, stype in stateinfo.items(): - if stype == 'exclusive': - if s not in linfo.errorf: - errorlog.warning("No error rule is defined for exclusive state '%s'", s) - if s not in linfo.ignore and lexobj.lexignore: - errorlog.warning("No ignore rule is defined for exclusive state '%s'", s) - elif stype == 'inclusive': - if s not in linfo.errorf: - linfo.errorf[s] = linfo.errorf.get('INITIAL', None) - if s not in linfo.ignore: - linfo.ignore[s] = linfo.ignore.get('INITIAL', '') - - # Create global versions of the token() and input() functions - token = lexobj.token - input = lexobj.input - lexer = lexobj - - # If in optimize mode, we write the lextab - if lextab and optimize: - if outputdir is None: - # If no output directory is set, the location of the output files - # is determined according to the following rules: - # - If lextab specifies a package, files go into that package directory - # - Otherwise, files go in the same directory as the specifying module - if isinstance(lextab, types.ModuleType): - srcfile = lextab.__file__ - else: - if '.' not in lextab: - srcfile = ldict['__file__'] - else: - parts = lextab.split('.') - pkgname = '.'.join(parts[:-1]) - exec('import %s' % pkgname) - srcfile = getattr(sys.modules[pkgname], '__file__', '') - outputdir = os.path.dirname(srcfile) - try: - lexobj.writetab(lextab, outputdir) - if lextab in sys.modules: - del sys.modules[lextab] - except IOError as e: - errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e)) - - return lexobj - -# ----------------------------------------------------------------------------- -# runmain() -# -# This runs the lexer as a main program -# ----------------------------------------------------------------------------- - -def runmain(lexer=None, data=None): - if not data: - try: - filename = sys.argv[1] - with open(filename) as f: - data = f.read() - except IndexError: - sys.stdout.write('Reading from standard input (type EOF to end):\n') - data = sys.stdin.read() - - if lexer: - _input = lexer.input - else: - _input = input - _input(data) - if lexer: - _token = lexer.token - else: - _token = token - - while True: - tok = _token() - if not tok: - break - sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos)) - -# ----------------------------------------------------------------------------- -# @TOKEN(regex) -# -# This decorator function can be used to set the regex expression on a function -# when its docstring might need to be set in an alternative way -# ----------------------------------------------------------------------------- - -def TOKEN(r): - def set_regex(f): - if hasattr(r, '__call__'): - f.regex = _get_regex(r) - else: - f.regex = r - return f - return set_regex - -# Alternative spelling of the TOKEN decorator -Token = TOKEN diff --git a/xonsh/ply/ply/yacc.py b/xonsh/ply/ply/yacc.py deleted file mode 100644 index 108c43d..0000000 --- a/xonsh/ply/ply/yacc.py +++ /dev/null @@ -1,3504 +0,0 @@ -# ----------------------------------------------------------------------------- -# ply: yacc.py -# -# Copyright (C) 2001-2019 -# David M. Beazley (Dabeaz LLC) -# All rights reserved. -# -# Latest version: https://github.com/dabeaz/ply -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are -# met: -# -# * Redistributions of source code must retain the above copyright notice, -# this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright notice, -# this list of conditions and the following disclaimer in the documentation -# and/or other materials provided with the distribution. -# * Neither the name of David Beazley or Dabeaz LLC may be used to -# endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ----------------------------------------------------------------------------- -# -# This implements an LR parser that is constructed from grammar rules defined -# as Python functions. The grammar is specified by supplying the BNF inside -# Python documentation strings. The inspiration for this technique was borrowed -# from John Aycock's Spark parsing system. PLY might be viewed as cross between -# Spark and the GNU bison utility. -# -# The current implementation is only somewhat object-oriented. The -# LR parser itself is defined in terms of an object (which allows multiple -# parsers to co-exist). However, most of the variables used during table -# construction are defined in terms of global variables. Users shouldn't -# notice unless they are trying to define multiple parsers at the same -# time using threads (in which case they should have their head examined). -# -# This implementation supports both SLR and LALR(1) parsing. LALR(1) -# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu), -# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles, -# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced -# by the more efficient DeRemer and Pennello algorithm. -# -# :::::::: WARNING ::::::: -# -# Construction of LR parsing tables is fairly complicated and expensive. -# To make this module run fast, a *LOT* of work has been put into -# optimization---often at the expensive of readability and what might -# consider to be good Python "coding style." Modify the code at your -# own risk! -# ---------------------------------------------------------------------------- - -import re -import types -import sys -import os.path -import inspect -import warnings - -__version__ = '3.11' -__tabversion__ = '3.10' - -#----------------------------------------------------------------------------- -# === User configurable parameters === -# -# Change these to modify the default behavior of yacc (if you wish) -#----------------------------------------------------------------------------- - -yaccdebug = True # Debugging mode. If set, yacc generates a - # a 'parser.out' file in the current directory - -debug_file = 'parser.out' # Default name of the debugging file -tab_module = 'parsetab' # Default name of the table module -default_lr = 'LALR' # Default LR table generation method - -error_count = 3 # Number of symbols that must be shifted to leave recovery mode - -yaccdevel = False # Set to True if developing yacc. This turns off optimized - # implementations of certain functions. - -resultlimit = 40 # Size limit of results when running in debug mode. - -pickle_protocol = 0 # Protocol to use when writing pickle files - -# String type-checking compatibility -if sys.version_info[0] < 3: - string_types = basestring -else: - string_types = str - -MAXINT = sys.maxsize - -# This object is a stand-in for a logging object created by the -# logging module. PLY will use this by default to create things -# such as the parser.out file. If a user wants more detailed -# information, they can create their own logging object and pass -# it into PLY. - -class PlyLogger(object): - def __init__(self, f): - self.f = f - - def debug(self, msg, *args, **kwargs): - self.f.write((msg % args) + '\n') - - info = debug - - def warning(self, msg, *args, **kwargs): - self.f.write('WARNING: ' + (msg % args) + '\n') - - def error(self, msg, *args, **kwargs): - self.f.write('ERROR: ' + (msg % args) + '\n') - - critical = debug - -# Null logger is used when no output is generated. Does nothing. -class NullLogger(object): - def __getattribute__(self, name): - return self - - def __call__(self, *args, **kwargs): - return self - -# Exception raised for yacc-related errors -class YaccError(Exception): - pass - -# Format the result message that the parser produces when running in debug mode. -def format_result(r): - repr_str = repr(r) - if '\n' in repr_str: - repr_str = repr(repr_str) - if len(repr_str) > resultlimit: - repr_str = repr_str[:resultlimit] + ' ...' - result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str) - return result - -# Format stack entries when the parser is running in debug mode -def format_stack_entry(r): - repr_str = repr(r) - if '\n' in repr_str: - repr_str = repr(repr_str) - if len(repr_str) < 16: - return repr_str - else: - return '<%s @ 0x%x>' % (type(r).__name__, id(r)) - -# Panic mode error recovery support. This feature is being reworked--much of the -# code here is to offer a deprecation/backwards compatible transition - -_errok = None -_token = None -_restart = None -_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error(). -Instead, invoke the methods on the associated parser instance: - - def p_error(p): - ... - # Use parser.errok(), parser.token(), parser.restart() - ... - - parser = yacc.yacc() -''' - -def errok(): - warnings.warn(_warnmsg) - return _errok() - -def restart(): - warnings.warn(_warnmsg) - return _restart() - -def token(): - warnings.warn(_warnmsg) - return _token() - -# Utility function to call the p_error() function with some deprecation hacks -def call_errorfunc(errorfunc, token, parser): - global _errok, _token, _restart - _errok = parser.errok - _token = parser.token - _restart = parser.restart - r = errorfunc(token) - try: - del _errok, _token, _restart - except NameError: - pass - return r - -#----------------------------------------------------------------------------- -# === LR Parsing Engine === -# -# The following classes are used for the LR parser itself. These are not -# used during table construction and are independent of the actual LR -# table generation algorithm -#----------------------------------------------------------------------------- - -# This class is used to hold non-terminal grammar symbols during parsing. -# It normally has the following attributes set: -# .type = Grammar symbol type -# .value = Symbol value -# .lineno = Starting line number -# .endlineno = Ending line number (optional, set automatically) -# .lexpos = Starting lex position -# .endlexpos = Ending lex position (optional, set automatically) - -class YaccSymbol: - def __str__(self): - return self.type - - def __repr__(self): - return str(self) - -# This class is a wrapper around the objects actually passed to each -# grammar rule. Index lookup and assignment actually assign the -# .value attribute of the underlying YaccSymbol object. -# The lineno() method returns the line number of a given -# item (or 0 if not defined). The linespan() method returns -# a tuple of (startline,endline) representing the range of lines -# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos) -# representing the range of positional information for a symbol. - -class YaccProduction: - def __init__(self, s, stack=None): - self.slice = s - self.stack = stack - self.lexer = None - self.parser = None - - def __getitem__(self, n): - if isinstance(n, slice): - return [s.value for s in self.slice[n]] - elif n >= 0: - return self.slice[n].value - else: - return self.stack[n].value - - def __setitem__(self, n, v): - self.slice[n].value = v - - def __getslice__(self, i, j): - return [s.value for s in self.slice[i:j]] - - def __len__(self): - return len(self.slice) - - def lineno(self, n): - return getattr(self.slice[n], 'lineno', 0) - - def set_lineno(self, n, lineno): - self.slice[n].lineno = lineno - - def linespan(self, n): - startline = getattr(self.slice[n], 'lineno', 0) - endline = getattr(self.slice[n], 'endlineno', startline) - return startline, endline - - def lexpos(self, n): - return getattr(self.slice[n], 'lexpos', 0) - - def set_lexpos(self, n, lexpos): - self.slice[n].lexpos = lexpos - - def lexspan(self, n): - startpos = getattr(self.slice[n], 'lexpos', 0) - endpos = getattr(self.slice[n], 'endlexpos', startpos) - return startpos, endpos - - def error(self): - raise SyntaxError - -# ----------------------------------------------------------------------------- -# == LRParser == -# -# The LR Parsing engine. -# ----------------------------------------------------------------------------- - -class LRParser: - def __init__(self, lrtab, errorf): - self.productions = lrtab.lr_productions - self.action = lrtab.lr_action - self.goto = lrtab.lr_goto - self.errorfunc = errorf - self.set_defaulted_states() - self.errorok = True - - def errok(self): - self.errorok = True - - def restart(self): - del self.statestack[:] - del self.symstack[:] - sym = YaccSymbol() - sym.type = '$end' - self.symstack.append(sym) - self.statestack.append(0) - - # Defaulted state support. - # This method identifies parser states where there is only one possible reduction action. - # For such states, the parser can make a choose to make a rule reduction without consuming - # the next look-ahead token. This delayed invocation of the tokenizer can be useful in - # certain kinds of advanced parsing situations where the lexer and parser interact with - # each other or change states (i.e., manipulation of scope, lexer states, etc.). - # - # See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions - def set_defaulted_states(self): - self.defaulted_states = {} - for state, actions in self.action.items(): - rules = list(actions.values()) - if len(rules) == 1 and rules[0] < 0: - self.defaulted_states[state] = rules[0] - - def disable_defaulted_states(self): - self.defaulted_states = {} - - def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): - if debug or yaccdevel: - if isinstance(debug, int): - debug = PlyLogger(sys.stderr) - return self.parsedebug(input, lexer, debug, tracking, tokenfunc) - elif tracking: - return self.parseopt(input, lexer, debug, tracking, tokenfunc) - else: - return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc) - - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parsedebug(). - # - # This is the debugging enabled version of parse(). All changes made to the - # parsing engine should be made here. Optimized versions of this function - # are automatically created by the ply/ygen.py script. This script cuts out - # sections enclosed in markers such as this: - # - # #--! DEBUG - # statements - # #--! DEBUG - # - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): - #--! parsedebug-start - lookahead = None # Current lookahead symbol - lookaheadstack = [] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - defaulted_states = self.defaulted_states # Local reference to defaulted states - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - #--! DEBUG - debug.info('PLY: PARSE DEBUG START') - #--! DEBUG - - # If no lexer was given, we will try to use the lex module - if not lexer: - from . import lex - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set the parser() token method (sometimes used in error recovery) - self.token = get_token - - # Set up the state and symbol stacks - - statestack = [] # Stack of parsing states - self.statestack = statestack - symstack = [] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = '$end' - symstack.append(sym) - state = 0 - while True: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - #--! DEBUG - debug.debug('') - debug.debug('State : %s', state) - #--! DEBUG - - if state not in defaulted_states: - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$end' - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - else: - t = defaulted_states[state] - #--! DEBUG - debug.debug('Defaulted state %s: Reduce using %d', state, -t) - #--! DEBUG - - #--! DEBUG - debug.debug('Stack : %s', - ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) - #--! DEBUG - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - #--! DEBUG - debug.debug('Action : Shift and goto state %s', t) - #--! DEBUG - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: - errorcount -= 1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - #--! DEBUG - if plen: - debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, - '['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']', - goto[statestack[-1-plen]][pname]) - else: - debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [], - goto[statestack[-1]][pname]) - - #--! DEBUG - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - #--! TRACKING - if tracking: - t1 = targ[1] - sym.lineno = t1.lineno - sym.lexpos = t1.lexpos - t1 = targ[-1] - sym.endlineno = getattr(t1, 'endlineno', t1.lineno) - sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) - #--! TRACKING - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - self.state = state - p.callable(pslice) - del statestack[-plen:] - #--! DEBUG - debug.info('Result : %s', format_result(pslice[0])) - #--! DEBUG - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - symstack.extend(targ[1:-1]) # Put the production slice back on the stack - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - #--! TRACKING - if tracking: - sym.lineno = lexer.lineno - sym.lexpos = lexer.lexpos - #--! TRACKING - - targ = [sym] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - self.state = state - p.callable(pslice) - #--! DEBUG - debug.info('Result : %s', format_result(pslice[0])) - #--! DEBUG - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - result = getattr(n, 'value', None) - #--! DEBUG - debug.info('Done : Returning %s', format_result(result)) - debug.info('PLY: PARSE DEBUG END') - #--! DEBUG - return result - - if t is None: - - #--! DEBUG - debug.error('Error : %s', - ('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip()) - #--! DEBUG - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = False - errtoken = lookahead - if errtoken.type == '$end': - errtoken = None # End of file! - if self.errorfunc: - if errtoken and not hasattr(errtoken, 'lexer'): - errtoken.lexer = lexer - self.state = state - tok = call_errorfunc(self.errorfunc, errtoken, self) - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken, 'lineno'): - lineno = lookahead.lineno - else: - lineno = 0 - if lineno: - sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) - else: - sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) - else: - sys.stderr.write('yacc: Parse error in input. EOF\n') - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != '$end': - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == '$end': - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - #--! TRACKING - if tracking: - sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) - sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) - #--! TRACKING - lookahead = None - continue - - # Create the error symbol for the first time and make it the new lookahead symbol - t = YaccSymbol() - t.type = 'error' - - if hasattr(lookahead, 'lineno'): - t.lineno = t.endlineno = lookahead.lineno - if hasattr(lookahead, 'lexpos'): - t.lexpos = t.endlexpos = lookahead.lexpos - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - sym = symstack.pop() - #--! TRACKING - if tracking: - lookahead.lineno = sym.lineno - lookahead.lexpos = sym.lexpos - #--! TRACKING - statestack.pop() - state = statestack[-1] - - continue - - # Call an error function here - raise RuntimeError('yacc: internal parser error!!!\n') - - #--! parsedebug-end - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parseopt(). - # - # Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY! - # This code is automatically generated by the ply/ygen.py script. Make - # changes to the parsedebug() method instead. - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): - #--! parseopt-start - lookahead = None # Current lookahead symbol - lookaheadstack = [] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - defaulted_states = self.defaulted_states # Local reference to defaulted states - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - - # If no lexer was given, we will try to use the lex module - if not lexer: - from . import lex - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set the parser() token method (sometimes used in error recovery) - self.token = get_token - - # Set up the state and symbol stacks - - statestack = [] # Stack of parsing states - self.statestack = statestack - symstack = [] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = '$end' - symstack.append(sym) - state = 0 - while True: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - - if state not in defaulted_states: - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$end' - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - else: - t = defaulted_states[state] - - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: - errorcount -= 1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - #--! TRACKING - if tracking: - t1 = targ[1] - sym.lineno = t1.lineno - sym.lexpos = t1.lexpos - t1 = targ[-1] - sym.endlineno = getattr(t1, 'endlineno', t1.lineno) - sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos) - #--! TRACKING - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - self.state = state - p.callable(pslice) - del statestack[-plen:] - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - symstack.extend(targ[1:-1]) # Put the production slice back on the stack - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - #--! TRACKING - if tracking: - sym.lineno = lexer.lineno - sym.lexpos = lexer.lexpos - #--! TRACKING - - targ = [sym] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - self.state = state - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - result = getattr(n, 'value', None) - return result - - if t is None: - - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = False - errtoken = lookahead - if errtoken.type == '$end': - errtoken = None # End of file! - if self.errorfunc: - if errtoken and not hasattr(errtoken, 'lexer'): - errtoken.lexer = lexer - self.state = state - tok = call_errorfunc(self.errorfunc, errtoken, self) - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken, 'lineno'): - lineno = lookahead.lineno - else: - lineno = 0 - if lineno: - sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) - else: - sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) - else: - sys.stderr.write('yacc: Parse error in input. EOF\n') - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != '$end': - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == '$end': - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - #--! TRACKING - if tracking: - sym.endlineno = getattr(lookahead, 'lineno', sym.lineno) - sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos) - #--! TRACKING - lookahead = None - continue - - # Create the error symbol for the first time and make it the new lookahead symbol - t = YaccSymbol() - t.type = 'error' - - if hasattr(lookahead, 'lineno'): - t.lineno = t.endlineno = lookahead.lineno - if hasattr(lookahead, 'lexpos'): - t.lexpos = t.endlexpos = lookahead.lexpos - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - sym = symstack.pop() - #--! TRACKING - if tracking: - lookahead.lineno = sym.lineno - lookahead.lexpos = sym.lexpos - #--! TRACKING - statestack.pop() - state = statestack[-1] - - continue - - # Call an error function here - raise RuntimeError('yacc: internal parser error!!!\n') - - #--! parseopt-end - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # parseopt_notrack(). - # - # Optimized version of parseopt() with line number tracking removed. - # DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated - # by the ply/ygen.py script. Make changes to the parsedebug() method instead. - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None): - #--! parseopt-notrack-start - lookahead = None # Current lookahead symbol - lookaheadstack = [] # Stack of lookahead symbols - actions = self.action # Local reference to action table (to avoid lookup on self.) - goto = self.goto # Local reference to goto table (to avoid lookup on self.) - prod = self.productions # Local reference to production list (to avoid lookup on self.) - defaulted_states = self.defaulted_states # Local reference to defaulted states - pslice = YaccProduction(None) # Production object passed to grammar rules - errorcount = 0 # Used during error recovery - - - # If no lexer was given, we will try to use the lex module - if not lexer: - from . import lex - lexer = lex.lexer - - # Set up the lexer and parser objects on pslice - pslice.lexer = lexer - pslice.parser = self - - # If input was supplied, pass to lexer - if input is not None: - lexer.input(input) - - if tokenfunc is None: - # Tokenize function - get_token = lexer.token - else: - get_token = tokenfunc - - # Set the parser() token method (sometimes used in error recovery) - self.token = get_token - - # Set up the state and symbol stacks - - statestack = [] # Stack of parsing states - self.statestack = statestack - symstack = [] # Stack of grammar symbols - self.symstack = symstack - - pslice.stack = symstack # Put in the production - errtoken = None # Err token - - # The start state is assumed to be (0,$end) - - statestack.append(0) - sym = YaccSymbol() - sym.type = '$end' - symstack.append(sym) - state = 0 - while True: - # Get the next symbol on the input. If a lookahead symbol - # is already set, we just use that. Otherwise, we'll pull - # the next token off of the lookaheadstack or from the lexer - - - if state not in defaulted_states: - if not lookahead: - if not lookaheadstack: - lookahead = get_token() # Get the next token - else: - lookahead = lookaheadstack.pop() - if not lookahead: - lookahead = YaccSymbol() - lookahead.type = '$end' - - # Check the action table - ltype = lookahead.type - t = actions[state].get(ltype) - else: - t = defaulted_states[state] - - - if t is not None: - if t > 0: - # shift a symbol on the stack - statestack.append(t) - state = t - - - symstack.append(lookahead) - lookahead = None - - # Decrease error count on successful shift - if errorcount: - errorcount -= 1 - continue - - if t < 0: - # reduce a symbol on the stack, emit a production - p = prod[-t] - pname = p.name - plen = p.len - - # Get production function - sym = YaccSymbol() - sym.type = pname # Production name - sym.value = None - - - if plen: - targ = symstack[-plen-1:] - targ[0] = sym - - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # below as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - del symstack[-plen:] - self.state = state - p.callable(pslice) - del statestack[-plen:] - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - symstack.extend(targ[1:-1]) # Put the production slice back on the stack - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - else: - - - targ = [sym] - - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - # The code enclosed in this section is duplicated - # above as a performance optimization. Make sure - # changes get made in both locations. - - pslice.slice = targ - - try: - # Call the grammar rule with our special slice object - self.state = state - p.callable(pslice) - symstack.append(sym) - state = goto[statestack[-1]][pname] - statestack.append(state) - except SyntaxError: - # If an error was set. Enter error recovery state - lookaheadstack.append(lookahead) # Save the current lookahead token - statestack.pop() # Pop back one state (before the reduce) - state = statestack[-1] - sym.type = 'error' - sym.value = 'error' - lookahead = sym - errorcount = error_count - self.errorok = False - - continue - # !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - - if t == 0: - n = symstack[-1] - result = getattr(n, 'value', None) - return result - - if t is None: - - - # We have some kind of parsing error here. To handle - # this, we are going to push the current token onto - # the tokenstack and replace it with an 'error' token. - # If there are any synchronization rules, they may - # catch it. - # - # In addition to pushing the error token, we call call - # the user defined p_error() function if this is the - # first syntax error. This function is only called if - # errorcount == 0. - if errorcount == 0 or self.errorok: - errorcount = error_count - self.errorok = False - errtoken = lookahead - if errtoken.type == '$end': - errtoken = None # End of file! - if self.errorfunc: - if errtoken and not hasattr(errtoken, 'lexer'): - errtoken.lexer = lexer - self.state = state - tok = call_errorfunc(self.errorfunc, errtoken, self) - if self.errorok: - # User must have done some kind of panic - # mode recovery on their own. The - # returned token is the next lookahead - lookahead = tok - errtoken = None - continue - else: - if errtoken: - if hasattr(errtoken, 'lineno'): - lineno = lookahead.lineno - else: - lineno = 0 - if lineno: - sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type)) - else: - sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type) - else: - sys.stderr.write('yacc: Parse error in input. EOF\n') - return - - else: - errorcount = error_count - - # case 1: the statestack only has 1 entry on it. If we're in this state, the - # entire parse has been rolled back and we're completely hosed. The token is - # discarded and we just keep going. - - if len(statestack) <= 1 and lookahead.type != '$end': - lookahead = None - errtoken = None - state = 0 - # Nuke the pushback stack - del lookaheadstack[:] - continue - - # case 2: the statestack has a couple of entries on it, but we're - # at the end of the file. nuke the top entry and generate an error token - - # Start nuking entries on the stack - if lookahead.type == '$end': - # Whoa. We're really hosed here. Bail out - return - - if lookahead.type != 'error': - sym = symstack[-1] - if sym.type == 'error': - # Hmmm. Error is on top of stack, we'll just nuke input - # symbol and continue - lookahead = None - continue - - # Create the error symbol for the first time and make it the new lookahead symbol - t = YaccSymbol() - t.type = 'error' - - if hasattr(lookahead, 'lineno'): - t.lineno = t.endlineno = lookahead.lineno - if hasattr(lookahead, 'lexpos'): - t.lexpos = t.endlexpos = lookahead.lexpos - t.value = lookahead - lookaheadstack.append(lookahead) - lookahead = t - else: - sym = symstack.pop() - statestack.pop() - state = statestack[-1] - - continue - - # Call an error function here - raise RuntimeError('yacc: internal parser error!!!\n') - - #--! parseopt-notrack-end - -# ----------------------------------------------------------------------------- -# === Grammar Representation === -# -# The following functions, classes, and variables are used to represent and -# manipulate the rules that make up a grammar. -# ----------------------------------------------------------------------------- - -# regex matching identifiers -_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$') - -# ----------------------------------------------------------------------------- -# class Production: -# -# This class stores the raw information about a single production or grammar rule. -# A grammar rule refers to a specification such as this: -# -# expr : expr PLUS term -# -# Here are the basic attributes defined on all productions -# -# name - Name of the production. For example 'expr' -# prod - A list of symbols on the right side ['expr','PLUS','term'] -# prec - Production precedence level -# number - Production number. -# func - Function that executes on reduce -# file - File where production function is defined -# lineno - Line number where production function is defined -# -# The following attributes are defined or optional. -# -# len - Length of the production (number of symbols on right hand side) -# usyms - Set of unique symbols found in the production -# ----------------------------------------------------------------------------- - -class Production(object): - reduced = 0 - def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0): - self.name = name - self.prod = tuple(prod) - self.number = number - self.func = func - self.callable = None - self.file = file - self.line = line - self.prec = precedence - - # Internal settings used during table construction - - self.len = len(self.prod) # Length of the production - - # Create a list of unique production symbols used in the production - self.usyms = [] - for s in self.prod: - if s not in self.usyms: - self.usyms.append(s) - - # List of all LR items for the production - self.lr_items = [] - self.lr_next = None - - # Create a string representation - if self.prod: - self.str = '%s -> %s' % (self.name, ' '.join(self.prod)) - else: - self.str = '%s -> ' % self.name - - def __str__(self): - return self.str - - def __repr__(self): - return 'Production(' + str(self) + ')' - - def __len__(self): - return len(self.prod) - - def __nonzero__(self): - return 1 - - def __getitem__(self, index): - return self.prod[index] - - # Return the nth lr_item from the production (or None if at the end) - def lr_item(self, n): - if n > len(self.prod): - return None - p = LRItem(self, n) - # Precompute the list of productions immediately following. - try: - p.lr_after = self.Prodnames[p.prod[n+1]] - except (IndexError, KeyError): - p.lr_after = [] - try: - p.lr_before = p.prod[n-1] - except IndexError: - p.lr_before = None - return p - - # Bind the production function name to a callable - def bind(self, pdict): - if self.func: - self.callable = pdict[self.func] - -# This class serves as a minimal standin for Production objects when -# reading table data from files. It only contains information -# actually used by the LR parsing engine, plus some additional -# debugging information. -class MiniProduction(object): - def __init__(self, str, name, len, func, file, line): - self.name = name - self.len = len - self.func = func - self.callable = None - self.file = file - self.line = line - self.str = str - - def __str__(self): - return self.str - - def __repr__(self): - return 'MiniProduction(%s)' % self.str - - # Bind the production function name to a callable - def bind(self, pdict): - if self.func: - self.callable = pdict[self.func] - - -# ----------------------------------------------------------------------------- -# class LRItem -# -# This class represents a specific stage of parsing a production rule. For -# example: -# -# expr : expr . PLUS term -# -# In the above, the "." represents the current location of the parse. Here -# basic attributes: -# -# name - Name of the production. For example 'expr' -# prod - A list of symbols on the right side ['expr','.', 'PLUS','term'] -# number - Production number. -# -# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term' -# then lr_next refers to 'expr -> expr PLUS . term' -# lr_index - LR item index (location of the ".") in the prod list. -# lookaheads - LALR lookahead symbols for this item -# len - Length of the production (number of symbols on right hand side) -# lr_after - List of all productions that immediately follow -# lr_before - Grammar symbol immediately before -# ----------------------------------------------------------------------------- - -class LRItem(object): - def __init__(self, p, n): - self.name = p.name - self.prod = list(p.prod) - self.number = p.number - self.lr_index = n - self.lookaheads = {} - self.prod.insert(n, '.') - self.prod = tuple(self.prod) - self.len = len(self.prod) - self.usyms = p.usyms - - def __str__(self): - if self.prod: - s = '%s -> %s' % (self.name, ' '.join(self.prod)) - else: - s = '%s -> ' % self.name - return s - - def __repr__(self): - return 'LRItem(' + str(self) + ')' - -# ----------------------------------------------------------------------------- -# rightmost_terminal() -# -# Return the rightmost terminal from a list of symbols. Used in add_production() -# ----------------------------------------------------------------------------- -def rightmost_terminal(symbols, terminals): - i = len(symbols) - 1 - while i >= 0: - if symbols[i] in terminals: - return symbols[i] - i -= 1 - return None - -# ----------------------------------------------------------------------------- -# === GRAMMAR CLASS === -# -# The following class represents the contents of the specified grammar along -# with various computed properties such as first sets, follow sets, LR items, etc. -# This data is used for critical parts of the table generation process later. -# ----------------------------------------------------------------------------- - -class GrammarError(YaccError): - pass - -class Grammar(object): - def __init__(self, terminals): - self.Productions = [None] # A list of all of the productions. The first - # entry is always reserved for the purpose of - # building an augmented grammar - - self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all - # productions of that nonterminal. - - self.Prodmap = {} # A dictionary that is only used to detect duplicate - # productions. - - self.Terminals = {} # A dictionary mapping the names of terminal symbols to a - # list of the rules where they are used. - - for term in terminals: - self.Terminals[term] = [] - - self.Terminals['error'] = [] - - self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list - # of rule numbers where they are used. - - self.First = {} # A dictionary of precomputed FIRST(x) symbols - - self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols - - self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the - # form ('right',level) or ('nonassoc', level) or ('left',level) - - self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer. - # This is only used to provide error checking and to generate - # a warning about unused precedence rules. - - self.Start = None # Starting symbol for the grammar - - - def __len__(self): - return len(self.Productions) - - def __getitem__(self, index): - return self.Productions[index] - - # ----------------------------------------------------------------------------- - # set_precedence() - # - # Sets the precedence for a given terminal. assoc is the associativity such as - # 'left','right', or 'nonassoc'. level is a numeric level. - # - # ----------------------------------------------------------------------------- - - def set_precedence(self, term, assoc, level): - assert self.Productions == [None], 'Must call set_precedence() before add_production()' - if term in self.Precedence: - raise GrammarError('Precedence already specified for terminal %r' % term) - if assoc not in ['left', 'right', 'nonassoc']: - raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'") - self.Precedence[term] = (assoc, level) - - # ----------------------------------------------------------------------------- - # add_production() - # - # Given an action function, this function assembles a production rule and - # computes its precedence level. - # - # The production rule is supplied as a list of symbols. For example, - # a rule such as 'expr : expr PLUS term' has a production name of 'expr' and - # symbols ['expr','PLUS','term']. - # - # Precedence is determined by the precedence of the right-most non-terminal - # or the precedence of a terminal specified by %prec. - # - # A variety of error checks are performed to make sure production symbols - # are valid and that %prec is used correctly. - # ----------------------------------------------------------------------------- - - def add_production(self, prodname, syms, func=None, file='', line=0): - - if prodname in self.Terminals: - raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname)) - if prodname == 'error': - raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname)) - if not _is_identifier.match(prodname): - raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname)) - - # Look for literal tokens - for n, s in enumerate(syms): - if s[0] in "'\"": - try: - c = eval(s) - if (len(c) > 1): - raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' % - (file, line, s, prodname)) - if c not in self.Terminals: - self.Terminals[c] = [] - syms[n] = c - continue - except SyntaxError: - pass - if not _is_identifier.match(s) and s != '%prec': - raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname)) - - # Determine the precedence level - if '%prec' in syms: - if syms[-1] == '%prec': - raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line)) - if syms[-2] != '%prec': - raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' % - (file, line)) - precname = syms[-1] - prodprec = self.Precedence.get(precname) - if not prodprec: - raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname)) - else: - self.UsedPrecedence.add(precname) - del syms[-2:] # Drop %prec from the rule - else: - # If no %prec, precedence is determined by the rightmost terminal symbol - precname = rightmost_terminal(syms, self.Terminals) - prodprec = self.Precedence.get(precname, ('right', 0)) - - # See if the rule is already in the rulemap - map = '%s -> %s' % (prodname, syms) - if map in self.Prodmap: - m = self.Prodmap[map] - raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) + - 'Previous definition at %s:%d' % (m.file, m.line)) - - # From this point on, everything is valid. Create a new Production instance - pnumber = len(self.Productions) - if prodname not in self.Nonterminals: - self.Nonterminals[prodname] = [] - - # Add the production number to Terminals and Nonterminals - for t in syms: - if t in self.Terminals: - self.Terminals[t].append(pnumber) - else: - if t not in self.Nonterminals: - self.Nonterminals[t] = [] - self.Nonterminals[t].append(pnumber) - - # Create a production and add it to the list of productions - p = Production(pnumber, prodname, syms, prodprec, func, file, line) - self.Productions.append(p) - self.Prodmap[map] = p - - # Add to the global productions list - try: - self.Prodnames[prodname].append(p) - except KeyError: - self.Prodnames[prodname] = [p] - - # ----------------------------------------------------------------------------- - # set_start() - # - # Sets the starting symbol and creates the augmented grammar. Production - # rule 0 is S' -> start where start is the start symbol. - # ----------------------------------------------------------------------------- - - def set_start(self, start=None): - if not start: - start = self.Productions[1].name - if start not in self.Nonterminals: - raise GrammarError('start symbol %s undefined' % start) - self.Productions[0] = Production(0, "S'", [start]) - self.Nonterminals[start].append(0) - self.Start = start - - # ----------------------------------------------------------------------------- - # find_unreachable() - # - # Find all of the nonterminal symbols that can't be reached from the starting - # symbol. Returns a list of nonterminals that can't be reached. - # ----------------------------------------------------------------------------- - - def find_unreachable(self): - - # Mark all symbols that are reachable from a symbol s - def mark_reachable_from(s): - if s in reachable: - return - reachable.add(s) - for p in self.Prodnames.get(s, []): - for r in p.prod: - mark_reachable_from(r) - - reachable = set() - mark_reachable_from(self.Productions[0].prod[0]) - return [s for s in self.Nonterminals if s not in reachable] - - # ----------------------------------------------------------------------------- - # infinite_cycles() - # - # This function looks at the various parsing rules and tries to detect - # infinite recursion cycles (grammar rules where there is no possible way - # to derive a string of only terminals). - # ----------------------------------------------------------------------------- - - def infinite_cycles(self): - terminates = {} - - # Terminals: - for t in self.Terminals: - terminates[t] = True - - terminates['$end'] = True - - # Nonterminals: - - # Initialize to false: - for n in self.Nonterminals: - terminates[n] = False - - # Then propagate termination until no change: - while True: - some_change = False - for (n, pl) in self.Prodnames.items(): - # Nonterminal n terminates iff any of its productions terminates. - for p in pl: - # Production p terminates iff all of its rhs symbols terminate. - for s in p.prod: - if not terminates[s]: - # The symbol s does not terminate, - # so production p does not terminate. - p_terminates = False - break - else: - # didn't break from the loop, - # so every symbol s terminates - # so production p terminates. - p_terminates = True - - if p_terminates: - # symbol n terminates! - if not terminates[n]: - terminates[n] = True - some_change = True - # Don't need to consider any more productions for this n. - break - - if not some_change: - break - - infinite = [] - for (s, term) in terminates.items(): - if not term: - if s not in self.Prodnames and s not in self.Terminals and s != 'error': - # s is used-but-not-defined, and we've already warned of that, - # so it would be overkill to say that it's also non-terminating. - pass - else: - infinite.append(s) - - return infinite - - # ----------------------------------------------------------------------------- - # undefined_symbols() - # - # Find all symbols that were used the grammar, but not defined as tokens or - # grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol - # and prod is the production where the symbol was used. - # ----------------------------------------------------------------------------- - def undefined_symbols(self): - result = [] - for p in self.Productions: - if not p: - continue - - for s in p.prod: - if s not in self.Prodnames and s not in self.Terminals and s != 'error': - result.append((s, p)) - return result - - # ----------------------------------------------------------------------------- - # unused_terminals() - # - # Find all terminals that were defined, but not used by the grammar. Returns - # a list of all symbols. - # ----------------------------------------------------------------------------- - def unused_terminals(self): - unused_tok = [] - for s, v in self.Terminals.items(): - if s != 'error' and not v: - unused_tok.append(s) - - return unused_tok - - # ------------------------------------------------------------------------------ - # unused_rules() - # - # Find all grammar rules that were defined, but not used (maybe not reachable) - # Returns a list of productions. - # ------------------------------------------------------------------------------ - - def unused_rules(self): - unused_prod = [] - for s, v in self.Nonterminals.items(): - if not v: - p = self.Prodnames[s][0] - unused_prod.append(p) - return unused_prod - - # ----------------------------------------------------------------------------- - # unused_precedence() - # - # Returns a list of tuples (term,precedence) corresponding to precedence - # rules that were never used by the grammar. term is the name of the terminal - # on which precedence was applied and precedence is a string such as 'left' or - # 'right' corresponding to the type of precedence. - # ----------------------------------------------------------------------------- - - def unused_precedence(self): - unused = [] - for termname in self.Precedence: - if not (termname in self.Terminals or termname in self.UsedPrecedence): - unused.append((termname, self.Precedence[termname][0])) - - return unused - - # ------------------------------------------------------------------------- - # _first() - # - # Compute the value of FIRST1(beta) where beta is a tuple of symbols. - # - # During execution of compute_first1, the result may be incomplete. - # Afterward (e.g., when called from compute_follow()), it will be complete. - # ------------------------------------------------------------------------- - def _first(self, beta): - - # We are computing First(x1,x2,x3,...,xn) - result = [] - for x in beta: - x_produces_empty = False - - # Add all the non- symbols of First[x] to the result. - for f in self.First[x]: - if f == '': - x_produces_empty = True - else: - if f not in result: - result.append(f) - - if x_produces_empty: - # We have to consider the next x in beta, - # i.e. stay in the loop. - pass - else: - # We don't have to consider any further symbols in beta. - break - else: - # There was no 'break' from the loop, - # so x_produces_empty was true for all x in beta, - # so beta produces empty as well. - result.append('') - - return result - - # ------------------------------------------------------------------------- - # compute_first() - # - # Compute the value of FIRST1(X) for all symbols - # ------------------------------------------------------------------------- - def compute_first(self): - if self.First: - return self.First - - # Terminals: - for t in self.Terminals: - self.First[t] = [t] - - self.First['$end'] = ['$end'] - - # Nonterminals: - - # Initialize to the empty set: - for n in self.Nonterminals: - self.First[n] = [] - - # Then propagate symbols until no change: - while True: - some_change = False - for n in self.Nonterminals: - for p in self.Prodnames[n]: - for f in self._first(p.prod): - if f not in self.First[n]: - self.First[n].append(f) - some_change = True - if not some_change: - break - - return self.First - - # --------------------------------------------------------------------- - # compute_follow() - # - # Computes all of the follow sets for every non-terminal symbol. The - # follow set is the set of all symbols that might follow a given - # non-terminal. See the Dragon book, 2nd Ed. p. 189. - # --------------------------------------------------------------------- - def compute_follow(self, start=None): - # If already computed, return the result - if self.Follow: - return self.Follow - - # If first sets not computed yet, do that first. - if not self.First: - self.compute_first() - - # Add '$end' to the follow list of the start symbol - for k in self.Nonterminals: - self.Follow[k] = [] - - if not start: - start = self.Productions[1].name - - self.Follow[start] = ['$end'] - - while True: - didadd = False - for p in self.Productions[1:]: - # Here is the production set - for i, B in enumerate(p.prod): - if B in self.Nonterminals: - # Okay. We got a non-terminal in a production - fst = self._first(p.prod[i+1:]) - hasempty = False - for f in fst: - if f != '' and f not in self.Follow[B]: - self.Follow[B].append(f) - didadd = True - if f == '': - hasempty = True - if hasempty or i == (len(p.prod)-1): - # Add elements of follow(a) to follow(b) - for f in self.Follow[p.name]: - if f not in self.Follow[B]: - self.Follow[B].append(f) - didadd = True - if not didadd: - break - return self.Follow - - - # ----------------------------------------------------------------------------- - # build_lritems() - # - # This function walks the list of productions and builds a complete set of the - # LR items. The LR items are stored in two ways: First, they are uniquely - # numbered and placed in the list _lritems. Second, a linked list of LR items - # is built for each production. For example: - # - # E -> E PLUS E - # - # Creates the list - # - # [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ] - # ----------------------------------------------------------------------------- - - def build_lritems(self): - for p in self.Productions: - lastlri = p - i = 0 - lr_items = [] - while True: - if i > len(p): - lri = None - else: - lri = LRItem(p, i) - # Precompute the list of productions immediately following - try: - lri.lr_after = self.Prodnames[lri.prod[i+1]] - except (IndexError, KeyError): - lri.lr_after = [] - try: - lri.lr_before = lri.prod[i-1] - except IndexError: - lri.lr_before = None - - lastlri.lr_next = lri - if not lri: - break - lr_items.append(lri) - lastlri = lri - i += 1 - p.lr_items = lr_items - -# ----------------------------------------------------------------------------- -# == Class LRTable == -# -# This basic class represents a basic table of LR parsing information. -# Methods for generating the tables are not defined here. They are defined -# in the derived class LRGeneratedTable. -# ----------------------------------------------------------------------------- - -class VersionError(YaccError): - pass - -class LRTable(object): - def __init__(self): - self.lr_action = None - self.lr_goto = None - self.lr_productions = None - self.lr_method = None - - def read_table(self, module): - if isinstance(module, types.ModuleType): - parsetab = module - else: - exec('import %s' % module) - parsetab = sys.modules[module] - - if parsetab._tabversion != __tabversion__: - raise VersionError('yacc table file version is out of date') - - self.lr_action = parsetab._lr_action - self.lr_goto = parsetab._lr_goto - - self.lr_productions = [] - for p in parsetab._lr_productions: - self.lr_productions.append(MiniProduction(*p)) - - self.lr_method = parsetab._lr_method - return parsetab._lr_signature - - def read_pickle(self, filename): - try: - import cPickle as pickle - except ImportError: - import pickle - - if not os.path.exists(filename): - raise ImportError - - in_f = open(filename, 'rb') - - tabversion = pickle.load(in_f) - if tabversion != __tabversion__: - raise VersionError('yacc table file version is out of date') - self.lr_method = pickle.load(in_f) - signature = pickle.load(in_f) - self.lr_action = pickle.load(in_f) - self.lr_goto = pickle.load(in_f) - productions = pickle.load(in_f) - - self.lr_productions = [] - for p in productions: - self.lr_productions.append(MiniProduction(*p)) - - in_f.close() - return signature - - # Bind all production function names to callable objects in pdict - def bind_callables(self, pdict): - for p in self.lr_productions: - p.bind(pdict) - - -# ----------------------------------------------------------------------------- -# === LR Generator === -# -# The following classes and functions are used to generate LR parsing tables on -# a grammar. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# digraph() -# traverse() -# -# The following two functions are used to compute set valued functions -# of the form: -# -# F(x) = F'(x) U U{F(y) | x R y} -# -# This is used to compute the values of Read() sets as well as FOLLOW sets -# in LALR(1) generation. -# -# Inputs: X - An input set -# R - A relation -# FP - Set-valued function -# ------------------------------------------------------------------------------ - -def digraph(X, R, FP): - N = {} - for x in X: - N[x] = 0 - stack = [] - F = {} - for x in X: - if N[x] == 0: - traverse(x, N, stack, F, X, R, FP) - return F - -def traverse(x, N, stack, F, X, R, FP): - stack.append(x) - d = len(stack) - N[x] = d - F[x] = FP(x) # F(X) <- F'(x) - - rel = R(x) # Get y's related to x - for y in rel: - if N[y] == 0: - traverse(y, N, stack, F, X, R, FP) - N[x] = min(N[x], N[y]) - for a in F.get(y, []): - if a not in F[x]: - F[x].append(a) - if N[x] == d: - N[stack[-1]] = MAXINT - F[stack[-1]] = F[x] - element = stack.pop() - while element != x: - N[stack[-1]] = MAXINT - F[stack[-1]] = F[x] - element = stack.pop() - -class LALRError(YaccError): - pass - -# ----------------------------------------------------------------------------- -# == LRGeneratedTable == -# -# This class implements the LR table generation algorithm. There are no -# public methods except for write() -# ----------------------------------------------------------------------------- - -class LRGeneratedTable(LRTable): - def __init__(self, grammar, method='LALR', log=None): - if method not in ['SLR', 'LALR']: - raise LALRError('Unsupported method %s' % method) - - self.grammar = grammar - self.lr_method = method - - # Set up the logger - if not log: - log = NullLogger() - self.log = log - - # Internal attributes - self.lr_action = {} # Action table - self.lr_goto = {} # Goto table - self.lr_productions = grammar.Productions # Copy of grammar Production array - self.lr_goto_cache = {} # Cache of computed gotos - self.lr0_cidhash = {} # Cache of closures - - self._add_count = 0 # Internal counter used to detect cycles - - # Diagonistic information filled in by the table generator - self.sr_conflict = 0 - self.rr_conflict = 0 - self.conflicts = [] # List of conflicts - - self.sr_conflicts = [] - self.rr_conflicts = [] - - # Build the tables - self.grammar.build_lritems() - self.grammar.compute_first() - self.grammar.compute_follow() - self.lr_parse_table() - - # Compute the LR(0) closure operation on I, where I is a set of LR(0) items. - - def lr0_closure(self, I): - self._add_count += 1 - - # Add everything in I to J - J = I[:] - didadd = True - while didadd: - didadd = False - for j in J: - for x in j.lr_after: - if getattr(x, 'lr0_added', 0) == self._add_count: - continue - # Add B --> .G to J - J.append(x.lr_next) - x.lr0_added = self._add_count - didadd = True - - return J - - # Compute the LR(0) goto function goto(I,X) where I is a set - # of LR(0) items and X is a grammar symbol. This function is written - # in a way that guarantees uniqueness of the generated goto sets - # (i.e. the same goto set will never be returned as two different Python - # objects). With uniqueness, we can later do fast set comparisons using - # id(obj) instead of element-wise comparison. - - def lr0_goto(self, I, x): - # First we look for a previously cached entry - g = self.lr_goto_cache.get((id(I), x)) - if g: - return g - - # Now we generate the goto set in a way that guarantees uniqueness - # of the result - - s = self.lr_goto_cache.get(x) - if not s: - s = {} - self.lr_goto_cache[x] = s - - gs = [] - for p in I: - n = p.lr_next - if n and n.lr_before == x: - s1 = s.get(id(n)) - if not s1: - s1 = {} - s[id(n)] = s1 - gs.append(n) - s = s1 - g = s.get('$end') - if not g: - if gs: - g = self.lr0_closure(gs) - s['$end'] = g - else: - s['$end'] = gs - self.lr_goto_cache[(id(I), x)] = g - return g - - # Compute the LR(0) sets of item function - def lr0_items(self): - C = [self.lr0_closure([self.grammar.Productions[0].lr_next])] - i = 0 - for I in C: - self.lr0_cidhash[id(I)] = i - i += 1 - - # Loop over the items in C and each grammar symbols - i = 0 - while i < len(C): - I = C[i] - i += 1 - - # Collect all of the symbols that could possibly be in the goto(I,X) sets - asyms = {} - for ii in I: - for s in ii.usyms: - asyms[s] = None - - for x in asyms: - g = self.lr0_goto(I, x) - if not g or id(g) in self.lr0_cidhash: - continue - self.lr0_cidhash[id(g)] = len(C) - C.append(g) - - return C - - # ----------------------------------------------------------------------------- - # ==== LALR(1) Parsing ==== - # - # LALR(1) parsing is almost exactly the same as SLR except that instead of - # relying upon Follow() sets when performing reductions, a more selective - # lookahead set that incorporates the state of the LR(0) machine is utilized. - # Thus, we mainly just have to focus on calculating the lookahead sets. - # - # The method used here is due to DeRemer and Pennelo (1982). - # - # DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1) - # Lookahead Sets", ACM Transactions on Programming Languages and Systems, - # Vol. 4, No. 4, Oct. 1982, pp. 615-649 - # - # Further details can also be found in: - # - # J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing", - # McGraw-Hill Book Company, (1985). - # - # ----------------------------------------------------------------------------- - - # ----------------------------------------------------------------------------- - # compute_nullable_nonterminals() - # - # Creates a dictionary containing all of the non-terminals that might produce - # an empty production. - # ----------------------------------------------------------------------------- - - def compute_nullable_nonterminals(self): - nullable = set() - num_nullable = 0 - while True: - for p in self.grammar.Productions[1:]: - if p.len == 0: - nullable.add(p.name) - continue - for t in p.prod: - if t not in nullable: - break - else: - nullable.add(p.name) - if len(nullable) == num_nullable: - break - num_nullable = len(nullable) - return nullable - - # ----------------------------------------------------------------------------- - # find_nonterminal_trans(C) - # - # Given a set of LR(0) items, this functions finds all of the non-terminal - # transitions. These are transitions in which a dot appears immediately before - # a non-terminal. Returns a list of tuples of the form (state,N) where state - # is the state number and N is the nonterminal symbol. - # - # The input C is the set of LR(0) items. - # ----------------------------------------------------------------------------- - - def find_nonterminal_transitions(self, C): - trans = [] - for stateno, state in enumerate(C): - for p in state: - if p.lr_index < p.len - 1: - t = (stateno, p.prod[p.lr_index+1]) - if t[1] in self.grammar.Nonterminals: - if t not in trans: - trans.append(t) - return trans - - # ----------------------------------------------------------------------------- - # dr_relation() - # - # Computes the DR(p,A) relationships for non-terminal transitions. The input - # is a tuple (state,N) where state is a number and N is a nonterminal symbol. - # - # Returns a list of terminals. - # ----------------------------------------------------------------------------- - - def dr_relation(self, C, trans, nullable): - state, N = trans - terms = [] - - g = self.lr0_goto(C[state], N) - for p in g: - if p.lr_index < p.len - 1: - a = p.prod[p.lr_index+1] - if a in self.grammar.Terminals: - if a not in terms: - terms.append(a) - - # This extra bit is to handle the start state - if state == 0 and N == self.grammar.Productions[0].prod[0]: - terms.append('$end') - - return terms - - # ----------------------------------------------------------------------------- - # reads_relation() - # - # Computes the READS() relation (p,A) READS (t,C). - # ----------------------------------------------------------------------------- - - def reads_relation(self, C, trans, empty): - # Look for empty transitions - rel = [] - state, N = trans - - g = self.lr0_goto(C[state], N) - j = self.lr0_cidhash.get(id(g), -1) - for p in g: - if p.lr_index < p.len - 1: - a = p.prod[p.lr_index + 1] - if a in empty: - rel.append((j, a)) - - return rel - - # ----------------------------------------------------------------------------- - # compute_lookback_includes() - # - # Determines the lookback and includes relations - # - # LOOKBACK: - # - # This relation is determined by running the LR(0) state machine forward. - # For example, starting with a production "N : . A B C", we run it forward - # to obtain "N : A B C ." We then build a relationship between this final - # state and the starting state. These relationships are stored in a dictionary - # lookdict. - # - # INCLUDES: - # - # Computes the INCLUDE() relation (p,A) INCLUDES (p',B). - # - # This relation is used to determine non-terminal transitions that occur - # inside of other non-terminal transition states. (p,A) INCLUDES (p', B) - # if the following holds: - # - # B -> LAT, where T -> epsilon and p' -L-> p - # - # L is essentially a prefix (which may be empty), T is a suffix that must be - # able to derive an empty string. State p' must lead to state p with the string L. - # - # ----------------------------------------------------------------------------- - - def compute_lookback_includes(self, C, trans, nullable): - lookdict = {} # Dictionary of lookback relations - includedict = {} # Dictionary of include relations - - # Make a dictionary of non-terminal transitions - dtrans = {} - for t in trans: - dtrans[t] = 1 - - # Loop over all transitions and compute lookbacks and includes - for state, N in trans: - lookb = [] - includes = [] - for p in C[state]: - if p.name != N: - continue - - # Okay, we have a name match. We now follow the production all the way - # through the state machine until we get the . on the right hand side - - lr_index = p.lr_index - j = state - while lr_index < p.len - 1: - lr_index = lr_index + 1 - t = p.prod[lr_index] - - # Check to see if this symbol and state are a non-terminal transition - if (j, t) in dtrans: - # Yes. Okay, there is some chance that this is an includes relation - # the only way to know for certain is whether the rest of the - # production derives empty - - li = lr_index + 1 - while li < p.len: - if p.prod[li] in self.grammar.Terminals: - break # No forget it - if p.prod[li] not in nullable: - break - li = li + 1 - else: - # Appears to be a relation between (j,t) and (state,N) - includes.append((j, t)) - - g = self.lr0_goto(C[j], t) # Go to next set - j = self.lr0_cidhash.get(id(g), -1) # Go to next state - - # When we get here, j is the final state, now we have to locate the production - for r in C[j]: - if r.name != p.name: - continue - if r.len != p.len: - continue - i = 0 - # This look is comparing a production ". A B C" with "A B C ." - while i < r.lr_index: - if r.prod[i] != p.prod[i+1]: - break - i = i + 1 - else: - lookb.append((j, r)) - for i in includes: - if i not in includedict: - includedict[i] = [] - includedict[i].append((state, N)) - lookdict[(state, N)] = lookb - - return lookdict, includedict - - # ----------------------------------------------------------------------------- - # compute_read_sets() - # - # Given a set of LR(0) items, this function computes the read sets. - # - # Inputs: C = Set of LR(0) items - # ntrans = Set of nonterminal transitions - # nullable = Set of empty transitions - # - # Returns a set containing the read sets - # ----------------------------------------------------------------------------- - - def compute_read_sets(self, C, ntrans, nullable): - FP = lambda x: self.dr_relation(C, x, nullable) - R = lambda x: self.reads_relation(C, x, nullable) - F = digraph(ntrans, R, FP) - return F - - # ----------------------------------------------------------------------------- - # compute_follow_sets() - # - # Given a set of LR(0) items, a set of non-terminal transitions, a readset, - # and an include set, this function computes the follow sets - # - # Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)} - # - # Inputs: - # ntrans = Set of nonterminal transitions - # readsets = Readset (previously computed) - # inclsets = Include sets (previously computed) - # - # Returns a set containing the follow sets - # ----------------------------------------------------------------------------- - - def compute_follow_sets(self, ntrans, readsets, inclsets): - FP = lambda x: readsets[x] - R = lambda x: inclsets.get(x, []) - F = digraph(ntrans, R, FP) - return F - - # ----------------------------------------------------------------------------- - # add_lookaheads() - # - # Attaches the lookahead symbols to grammar rules. - # - # Inputs: lookbacks - Set of lookback relations - # followset - Computed follow set - # - # This function directly attaches the lookaheads to productions contained - # in the lookbacks set - # ----------------------------------------------------------------------------- - - def add_lookaheads(self, lookbacks, followset): - for trans, lb in lookbacks.items(): - # Loop over productions in lookback - for state, p in lb: - if state not in p.lookaheads: - p.lookaheads[state] = [] - f = followset.get(trans, []) - for a in f: - if a not in p.lookaheads[state]: - p.lookaheads[state].append(a) - - # ----------------------------------------------------------------------------- - # add_lalr_lookaheads() - # - # This function does all of the work of adding lookahead information for use - # with LALR parsing - # ----------------------------------------------------------------------------- - - def add_lalr_lookaheads(self, C): - # Determine all of the nullable nonterminals - nullable = self.compute_nullable_nonterminals() - - # Find all non-terminal transitions - trans = self.find_nonterminal_transitions(C) - - # Compute read sets - readsets = self.compute_read_sets(C, trans, nullable) - - # Compute lookback/includes relations - lookd, included = self.compute_lookback_includes(C, trans, nullable) - - # Compute LALR FOLLOW sets - followsets = self.compute_follow_sets(trans, readsets, included) - - # Add all of the lookaheads - self.add_lookaheads(lookd, followsets) - - # ----------------------------------------------------------------------------- - # lr_parse_table() - # - # This function constructs the parse tables for SLR or LALR - # ----------------------------------------------------------------------------- - def lr_parse_table(self): - Productions = self.grammar.Productions - Precedence = self.grammar.Precedence - goto = self.lr_goto # Goto array - action = self.lr_action # Action array - log = self.log # Logger for output - - actionp = {} # Action production array (temporary) - - log.info('Parsing method: %s', self.lr_method) - - # Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items - # This determines the number of states - - C = self.lr0_items() - - if self.lr_method == 'LALR': - self.add_lalr_lookaheads(C) - - # Build the parser table, state by state - st = 0 - for I in C: - # Loop over each production in I - actlist = [] # List of actions - st_action = {} - st_actionp = {} - st_goto = {} - log.info('') - log.info('state %d', st) - log.info('') - for p in I: - log.info(' (%d) %s', p.number, p) - log.info('') - - for p in I: - if p.len == p.lr_index + 1: - if p.name == "S'": - # Start symbol. Accept! - st_action['$end'] = 0 - st_actionp['$end'] = p - else: - # We are at the end of a production. Reduce! - if self.lr_method == 'LALR': - laheads = p.lookaheads[st] - else: - laheads = self.grammar.Follow[p.name] - for a in laheads: - actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p))) - r = st_action.get(a) - if r is not None: - # Whoa. Have a shift/reduce or reduce/reduce conflict - if r > 0: - # Need to decide on shift or reduce here - # By default we favor shifting. Need to add - # some precedence rules here. - - # Shift precedence comes from the token - sprec, slevel = Precedence.get(a, ('right', 0)) - - # Reduce precedence comes from rule being reduced (p) - rprec, rlevel = Productions[p.number].prec - - if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')): - # We really need to reduce here. - st_action[a] = -p.number - st_actionp[a] = p - if not slevel and not rlevel: - log.info(' ! shift/reduce conflict for %s resolved as reduce', a) - self.sr_conflicts.append((st, a, 'reduce')) - Productions[p.number].reduced += 1 - elif (slevel == rlevel) and (rprec == 'nonassoc'): - st_action[a] = None - else: - # Hmmm. Guess we'll keep the shift - if not rlevel: - log.info(' ! shift/reduce conflict for %s resolved as shift', a) - self.sr_conflicts.append((st, a, 'shift')) - elif r < 0: - # Reduce/reduce conflict. In this case, we favor the rule - # that was defined first in the grammar file - oldp = Productions[-r] - pp = Productions[p.number] - if oldp.line > pp.line: - st_action[a] = -p.number - st_actionp[a] = p - chosenp, rejectp = pp, oldp - Productions[p.number].reduced += 1 - Productions[oldp.number].reduced -= 1 - else: - chosenp, rejectp = oldp, pp - self.rr_conflicts.append((st, chosenp, rejectp)) - log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)', - a, st_actionp[a].number, st_actionp[a]) - else: - raise LALRError('Unknown conflict in state %d' % st) - else: - st_action[a] = -p.number - st_actionp[a] = p - Productions[p.number].reduced += 1 - else: - i = p.lr_index - a = p.prod[i+1] # Get symbol right after the "." - if a in self.grammar.Terminals: - g = self.lr0_goto(I, a) - j = self.lr0_cidhash.get(id(g), -1) - if j >= 0: - # We are in a shift state - actlist.append((a, p, 'shift and go to state %d' % j)) - r = st_action.get(a) - if r is not None: - # Whoa have a shift/reduce or shift/shift conflict - if r > 0: - if r != j: - raise LALRError('Shift/shift conflict in state %d' % st) - elif r < 0: - # Do a precedence check. - # - if precedence of reduce rule is higher, we reduce. - # - if precedence of reduce is same and left assoc, we reduce. - # - otherwise we shift - - # Shift precedence comes from the token - sprec, slevel = Precedence.get(a, ('right', 0)) - - # Reduce precedence comes from the rule that could have been reduced - rprec, rlevel = Productions[st_actionp[a].number].prec - - if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')): - # We decide to shift here... highest precedence to shift - Productions[st_actionp[a].number].reduced -= 1 - st_action[a] = j - st_actionp[a] = p - if not rlevel: - log.info(' ! shift/reduce conflict for %s resolved as shift', a) - self.sr_conflicts.append((st, a, 'shift')) - elif (slevel == rlevel) and (rprec == 'nonassoc'): - st_action[a] = None - else: - # Hmmm. Guess we'll keep the reduce - if not slevel and not rlevel: - log.info(' ! shift/reduce conflict for %s resolved as reduce', a) - self.sr_conflicts.append((st, a, 'reduce')) - - else: - raise LALRError('Unknown conflict in state %d' % st) - else: - st_action[a] = j - st_actionp[a] = p - - # Print the actions associated with each terminal - _actprint = {} - for a, p, m in actlist: - if a in st_action: - if p is st_actionp[a]: - log.info(' %-15s %s', a, m) - _actprint[(a, m)] = 1 - log.info('') - # Print the actions that were not used. (debugging) - not_used = 0 - for a, p, m in actlist: - if a in st_action: - if p is not st_actionp[a]: - if not (a, m) in _actprint: - log.debug(' ! %-15s [ %s ]', a, m) - not_used = 1 - _actprint[(a, m)] = 1 - if not_used: - log.debug('') - - # Construct the goto table for this state - - nkeys = {} - for ii in I: - for s in ii.usyms: - if s in self.grammar.Nonterminals: - nkeys[s] = None - for n in nkeys: - g = self.lr0_goto(I, n) - j = self.lr0_cidhash.get(id(g), -1) - if j >= 0: - st_goto[n] = j - log.info(' %-30s shift and go to state %d', n, j) - - action[st] = st_action - actionp[st] = st_actionp - goto[st] = st_goto - st += 1 - - # ----------------------------------------------------------------------------- - # write() - # - # This function writes the LR parsing tables to a file - # ----------------------------------------------------------------------------- - - def write_table(self, tabmodule, outputdir='', signature=''): - if isinstance(tabmodule, types.ModuleType): - raise IOError("Won't overwrite existing tabmodule") - - basemodulename = tabmodule.split('.')[-1] - filename = os.path.join(outputdir, basemodulename) + '.py' - try: - f = open(filename, 'w') - - f.write(''' -# %s -# This file is automatically generated. Do not edit. -# pylint: disable=W,C,R -_tabversion = %r - -_lr_method = %r - -_lr_signature = %r - ''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature)) - - # Change smaller to 0 to go back to original tables - smaller = 1 - - # Factor out names to try and make smaller - if smaller: - items = {} - - for s, nd in self.lr_action.items(): - for name, v in nd.items(): - i = items.get(name) - if not i: - i = ([], []) - items[name] = i - i[0].append(s) - i[1].append(v) - - f.write('\n_lr_action_items = {') - for k, v in items.items(): - f.write('%r:([' % k) - for i in v[0]: - f.write('%r,' % i) - f.write('],[') - for i in v[1]: - f.write('%r,' % i) - - f.write(']),') - f.write('}\n') - - f.write(''' -_lr_action = {} -for _k, _v in _lr_action_items.items(): - for _x,_y in zip(_v[0],_v[1]): - if not _x in _lr_action: _lr_action[_x] = {} - _lr_action[_x][_k] = _y -del _lr_action_items -''') - - else: - f.write('\n_lr_action = { ') - for k, v in self.lr_action.items(): - f.write('(%r,%r):%r,' % (k[0], k[1], v)) - f.write('}\n') - - if smaller: - # Factor out names to try and make smaller - items = {} - - for s, nd in self.lr_goto.items(): - for name, v in nd.items(): - i = items.get(name) - if not i: - i = ([], []) - items[name] = i - i[0].append(s) - i[1].append(v) - - f.write('\n_lr_goto_items = {') - for k, v in items.items(): - f.write('%r:([' % k) - for i in v[0]: - f.write('%r,' % i) - f.write('],[') - for i in v[1]: - f.write('%r,' % i) - - f.write(']),') - f.write('}\n') - - f.write(''' -_lr_goto = {} -for _k, _v in _lr_goto_items.items(): - for _x, _y in zip(_v[0], _v[1]): - if not _x in _lr_goto: _lr_goto[_x] = {} - _lr_goto[_x][_k] = _y -del _lr_goto_items -''') - else: - f.write('\n_lr_goto = { ') - for k, v in self.lr_goto.items(): - f.write('(%r,%r):%r,' % (k[0], k[1], v)) - f.write('}\n') - - # Write production table - f.write('_lr_productions = [\n') - for p in self.lr_productions: - if p.func: - f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len, - p.func, os.path.basename(p.file), p.line)) - else: - f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len)) - f.write(']\n') - f.close() - - except IOError as e: - raise - - - # ----------------------------------------------------------------------------- - # pickle_table() - # - # This function pickles the LR parsing tables to a supplied file object - # ----------------------------------------------------------------------------- - - def pickle_table(self, filename, signature=''): - try: - import cPickle as pickle - except ImportError: - import pickle - with open(filename, 'wb') as outf: - pickle.dump(__tabversion__, outf, pickle_protocol) - pickle.dump(self.lr_method, outf, pickle_protocol) - pickle.dump(signature, outf, pickle_protocol) - pickle.dump(self.lr_action, outf, pickle_protocol) - pickle.dump(self.lr_goto, outf, pickle_protocol) - - outp = [] - for p in self.lr_productions: - if p.func: - outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line)) - else: - outp.append((str(p), p.name, p.len, None, None, None)) - pickle.dump(outp, outf, pickle_protocol) - -# ----------------------------------------------------------------------------- -# === INTROSPECTION === -# -# The following functions and classes are used to implement the PLY -# introspection features followed by the yacc() function itself. -# ----------------------------------------------------------------------------- - -# ----------------------------------------------------------------------------- -# get_caller_module_dict() -# -# This function returns a dictionary containing all of the symbols defined within -# a caller further down the call stack. This is used to get the environment -# associated with the yacc() call if none was provided. -# ----------------------------------------------------------------------------- - -def get_caller_module_dict(levels): - f = sys._getframe(levels) - ldict = f.f_globals.copy() - if f.f_globals != f.f_locals: - ldict.update(f.f_locals) - return ldict - -# ----------------------------------------------------------------------------- -# parse_grammar() -# -# This takes a raw grammar rule string and parses it into production data -# ----------------------------------------------------------------------------- -def parse_grammar(doc, file, line): - grammar = [] - # Split the doc string into lines - pstrings = doc.splitlines() - lastp = None - dline = line - for ps in pstrings: - dline += 1 - p = ps.split() - if not p: - continue - try: - if p[0] == '|': - # This is a continuation of a previous rule - if not lastp: - raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline)) - prodname = lastp - syms = p[1:] - else: - prodname = p[0] - lastp = prodname - syms = p[2:] - assign = p[1] - if assign != ':' and assign != '::=': - raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline)) - - grammar.append((file, dline, prodname, syms)) - except SyntaxError: - raise - except Exception: - raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip())) - - return grammar - -# ----------------------------------------------------------------------------- -# ParserReflect() -# -# This class represents information extracted for building a parser including -# start symbol, error function, tokens, precedence list, action functions, -# etc. -# ----------------------------------------------------------------------------- -class ParserReflect(object): - def __init__(self, pdict, log=None): - self.pdict = pdict - self.start = None - self.error_func = None - self.tokens = None - self.modules = set() - self.grammar = [] - self.error = False - - if log is None: - self.log = PlyLogger(sys.stderr) - else: - self.log = log - - # Get all of the basic information - def get_all(self): - self.get_start() - self.get_error_func() - self.get_tokens() - self.get_precedence() - self.get_pfunctions() - - # Validate all of the information - def validate_all(self): - self.validate_start() - self.validate_error_func() - self.validate_tokens() - self.validate_precedence() - self.validate_pfunctions() - self.validate_modules() - return self.error - - # Compute a signature over the grammar - def signature(self): - parts = [] - try: - if self.start: - parts.append(self.start) - if self.prec: - parts.append(''.join([''.join(p) for p in self.prec])) - if self.tokens: - parts.append(' '.join(self.tokens)) - for f in self.pfuncs: - if f[3]: - parts.append(f[3]) - except (TypeError, ValueError): - pass - return ''.join(parts) - - # ----------------------------------------------------------------------------- - # validate_modules() - # - # This method checks to see if there are duplicated p_rulename() functions - # in the parser module file. Without this function, it is really easy for - # users to make mistakes by cutting and pasting code fragments (and it's a real - # bugger to try and figure out why the resulting parser doesn't work). Therefore, - # we just do a little regular expression pattern matching of def statements - # to try and detect duplicates. - # ----------------------------------------------------------------------------- - - def validate_modules(self): - # Match def p_funcname( - fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(') - - for module in self.modules: - try: - lines, linen = inspect.getsourcelines(module) - except IOError: - continue - - counthash = {} - for linen, line in enumerate(lines): - linen += 1 - m = fre.match(line) - if m: - name = m.group(1) - prev = counthash.get(name) - if not prev: - counthash[name] = linen - else: - filename = inspect.getsourcefile(module) - self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d', - filename, linen, name, prev) - - # Get the start symbol - def get_start(self): - self.start = self.pdict.get('start') - - # Validate the start symbol - def validate_start(self): - if self.start is not None: - if not isinstance(self.start, string_types): - self.log.error("'start' must be a string") - - # Look for error handler - def get_error_func(self): - self.error_func = self.pdict.get('p_error') - - # Validate the error function - def validate_error_func(self): - if self.error_func: - if isinstance(self.error_func, types.FunctionType): - ismethod = 0 - elif isinstance(self.error_func, types.MethodType): - ismethod = 1 - else: - self.log.error("'p_error' defined, but is not a function or method") - self.error = True - return - - eline = self.error_func.__code__.co_firstlineno - efile = self.error_func.__code__.co_filename - module = inspect.getmodule(self.error_func) - self.modules.add(module) - - argcount = self.error_func.__code__.co_argcount - ismethod - if argcount != 1: - self.log.error('%s:%d: p_error() requires 1 argument', efile, eline) - self.error = True - - # Get the tokens map - def get_tokens(self): - tokens = self.pdict.get('tokens') - if not tokens: - self.log.error('No token list is defined') - self.error = True - return - - if not isinstance(tokens, (list, tuple)): - self.log.error('tokens must be a list or tuple') - self.error = True - return - - if not tokens: - self.log.error('tokens is empty') - self.error = True - return - - self.tokens = sorted(tokens) - - # Validate the tokens - def validate_tokens(self): - # Validate the tokens. - if 'error' in self.tokens: - self.log.error("Illegal token name 'error'. Is a reserved word") - self.error = True - return - - terminals = set() - for n in self.tokens: - if n in terminals: - self.log.warning('Token %r multiply defined', n) - terminals.add(n) - - # Get the precedence map (if any) - def get_precedence(self): - self.prec = self.pdict.get('precedence') - - # Validate and parse the precedence map - def validate_precedence(self): - preclist = [] - if self.prec: - if not isinstance(self.prec, (list, tuple)): - self.log.error('precedence must be a list or tuple') - self.error = True - return - for level, p in enumerate(self.prec): - if not isinstance(p, (list, tuple)): - self.log.error('Bad precedence table') - self.error = True - return - - if len(p) < 2: - self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p) - self.error = True - return - assoc = p[0] - if not isinstance(assoc, string_types): - self.log.error('precedence associativity must be a string') - self.error = True - return - for term in p[1:]: - if not isinstance(term, string_types): - self.log.error('precedence items must be strings') - self.error = True - return - preclist.append((term, assoc, level+1)) - self.preclist = preclist - - # Get all p_functions from the grammar - def get_pfunctions(self): - p_functions = [] - for name, item in self.pdict.items(): - if not name.startswith('p_') or name == 'p_error': - continue - if isinstance(item, (types.FunctionType, types.MethodType)): - line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno) - module = inspect.getmodule(item) - p_functions.append((line, module, name, item.__doc__)) - - # Sort all of the actions by line number; make sure to stringify - # modules to make them sortable, since `line` may not uniquely sort all - # p functions - p_functions.sort(key=lambda p_function: ( - p_function[0], - str(p_function[1]), - p_function[2], - p_function[3])) - self.pfuncs = p_functions - - # Validate all of the p_functions - def validate_pfunctions(self): - grammar = [] - # Check for non-empty symbols - if len(self.pfuncs) == 0: - self.log.error('no rules of the form p_rulename are defined') - self.error = True - return - - for line, module, name, doc in self.pfuncs: - file = inspect.getsourcefile(module) - func = self.pdict[name] - if isinstance(func, types.MethodType): - reqargs = 2 - else: - reqargs = 1 - if func.__code__.co_argcount > reqargs: - self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__) - self.error = True - elif func.__code__.co_argcount < reqargs: - self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__) - self.error = True - elif not func.__doc__: - self.log.warning('%s:%d: No documentation string specified in function %r (ignored)', - file, line, func.__name__) - else: - try: - parsed_g = parse_grammar(doc, file, line) - for g in parsed_g: - grammar.append((name, g)) - except SyntaxError as e: - self.log.error(str(e)) - self.error = True - - # Looks like a valid grammar rule - # Mark the file in which defined. - self.modules.add(module) - - # Secondary validation step that looks for p_ definitions that are not functions - # or functions that look like they might be grammar rules. - - for n, v in self.pdict.items(): - if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)): - continue - if n.startswith('t_'): - continue - if n.startswith('p_') and n != 'p_error': - self.log.warning('%r not defined as a function', n) - if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or - (isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)): - if v.__doc__: - try: - doc = v.__doc__.split(' ') - if doc[1] == ':': - self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix', - v.__code__.co_filename, v.__code__.co_firstlineno, n) - except IndexError: - pass - - self.grammar = grammar - -# ----------------------------------------------------------------------------- -# yacc(module) -# -# Build a parser -# ----------------------------------------------------------------------------- - -def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None, - check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file, - outputdir=None, debuglog=None, errorlog=None, picklefile=None): - - if tabmodule is None: - tabmodule = tab_module - - # Reference to the parsing method of the last built parser - global parse - - # If pickling is enabled, table files are not created - if picklefile: - write_tables = 0 - - if errorlog is None: - errorlog = PlyLogger(sys.stderr) - - # Get the module dictionary used for the parser - if module: - _items = [(k, getattr(module, k)) for k in dir(module)] - pdict = dict(_items) - # If no __file__ or __package__ attributes are available, try to obtain them - # from the __module__ instead - if '__file__' not in pdict: - pdict['__file__'] = sys.modules[pdict['__module__']].__file__ - if '__package__' not in pdict and '__module__' in pdict: - if hasattr(sys.modules[pdict['__module__']], '__package__'): - pdict['__package__'] = sys.modules[pdict['__module__']].__package__ - else: - pdict = get_caller_module_dict(2) - - if outputdir is None: - # If no output directory is set, the location of the output files - # is determined according to the following rules: - # - If tabmodule specifies a package, files go into that package directory - # - Otherwise, files go in the same directory as the specifying module - if isinstance(tabmodule, types.ModuleType): - srcfile = tabmodule.__file__ - else: - if '.' not in tabmodule: - srcfile = pdict['__file__'] - else: - parts = tabmodule.split('.') - pkgname = '.'.join(parts[:-1]) - exec('import %s' % pkgname) - srcfile = getattr(sys.modules[pkgname], '__file__', '') - outputdir = os.path.dirname(srcfile) - - # Determine if the module is package of a package or not. - # If so, fix the tabmodule setting so that tables load correctly - pkg = pdict.get('__package__') - if pkg and isinstance(tabmodule, str): - if '.' not in tabmodule: - tabmodule = pkg + '.' + tabmodule - - - - # Set start symbol if it's specified directly using an argument - if start is not None: - pdict['start'] = start - - # Collect parser information from the dictionary - pinfo = ParserReflect(pdict, log=errorlog) - pinfo.get_all() - - if pinfo.error: - raise YaccError('Unable to build parser') - - # Check signature against table files (if any) - signature = pinfo.signature() - - # Read the tables - try: - lr = LRTable() - if picklefile: - read_signature = lr.read_pickle(picklefile) - else: - read_signature = lr.read_table(tabmodule) - if optimize or (read_signature == signature): - try: - lr.bind_callables(pinfo.pdict) - parser = LRParser(lr, pinfo.error_func) - parse = parser.parse - return parser - except Exception as e: - errorlog.warning('There was a problem loading the table file: %r', e) - except VersionError as e: - errorlog.warning(str(e)) - except ImportError: - pass - - if debuglog is None: - if debug: - try: - debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w')) - except IOError as e: - errorlog.warning("Couldn't open %r. %s" % (debugfile, e)) - debuglog = NullLogger() - else: - debuglog = NullLogger() - - debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__) - - errors = False - - # Validate the parser information - if pinfo.validate_all(): - raise YaccError('Unable to build parser') - - if not pinfo.error_func: - errorlog.warning('no p_error() function is defined') - - # Create a grammar object - grammar = Grammar(pinfo.tokens) - - # Set precedence level for terminals - for term, assoc, level in pinfo.preclist: - try: - grammar.set_precedence(term, assoc, level) - except GrammarError as e: - errorlog.warning('%s', e) - - # Add productions to the grammar - for funcname, gram in pinfo.grammar: - file, line, prodname, syms = gram - try: - grammar.add_production(prodname, syms, funcname, file, line) - except GrammarError as e: - errorlog.error('%s', e) - errors = True - - # Set the grammar start symbols - try: - if start is None: - grammar.set_start(pinfo.start) - else: - grammar.set_start(start) - except GrammarError as e: - errorlog.error(str(e)) - errors = True - - if errors: - raise YaccError('Unable to build parser') - - # Verify the grammar structure - undefined_symbols = grammar.undefined_symbols() - for sym, prod in undefined_symbols: - errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym) - errors = True - - unused_terminals = grammar.unused_terminals() - if unused_terminals: - debuglog.info('') - debuglog.info('Unused terminals:') - debuglog.info('') - for term in unused_terminals: - errorlog.warning('Token %r defined, but not used', term) - debuglog.info(' %s', term) - - # Print out all productions to the debug log - if debug: - debuglog.info('') - debuglog.info('Grammar') - debuglog.info('') - for n, p in enumerate(grammar.Productions): - debuglog.info('Rule %-5d %s', n, p) - - # Find unused non-terminals - unused_rules = grammar.unused_rules() - for prod in unused_rules: - errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name) - - if len(unused_terminals) == 1: - errorlog.warning('There is 1 unused token') - if len(unused_terminals) > 1: - errorlog.warning('There are %d unused tokens', len(unused_terminals)) - - if len(unused_rules) == 1: - errorlog.warning('There is 1 unused rule') - if len(unused_rules) > 1: - errorlog.warning('There are %d unused rules', len(unused_rules)) - - if debug: - debuglog.info('') - debuglog.info('Terminals, with rules where they appear') - debuglog.info('') - terms = list(grammar.Terminals) - terms.sort() - for term in terms: - debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]])) - - debuglog.info('') - debuglog.info('Nonterminals, with rules where they appear') - debuglog.info('') - nonterms = list(grammar.Nonterminals) - nonterms.sort() - for nonterm in nonterms: - debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]])) - debuglog.info('') - - if check_recursion: - unreachable = grammar.find_unreachable() - for u in unreachable: - errorlog.warning('Symbol %r is unreachable', u) - - infinite = grammar.infinite_cycles() - for inf in infinite: - errorlog.error('Infinite recursion detected for symbol %r', inf) - errors = True - - unused_prec = grammar.unused_precedence() - for term, assoc in unused_prec: - errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term) - errors = True - - if errors: - raise YaccError('Unable to build parser') - - # Run the LRGeneratedTable on the grammar - if debug: - errorlog.debug('Generating %s tables', method) - - lr = LRGeneratedTable(grammar, method, debuglog) - - if debug: - num_sr = len(lr.sr_conflicts) - - # Report shift/reduce and reduce/reduce conflicts - if num_sr == 1: - errorlog.warning('1 shift/reduce conflict') - elif num_sr > 1: - errorlog.warning('%d shift/reduce conflicts', num_sr) - - num_rr = len(lr.rr_conflicts) - if num_rr == 1: - errorlog.warning('1 reduce/reduce conflict') - elif num_rr > 1: - errorlog.warning('%d reduce/reduce conflicts', num_rr) - - # Write out conflicts to the output file - if debug and (lr.sr_conflicts or lr.rr_conflicts): - debuglog.warning('') - debuglog.warning('Conflicts:') - debuglog.warning('') - - for state, tok, resolution in lr.sr_conflicts: - debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution) - - already_reported = set() - for state, rule, rejected in lr.rr_conflicts: - if (state, id(rule), id(rejected)) in already_reported: - continue - debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) - debuglog.warning('rejected rule (%s) in state %d', rejected, state) - errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule) - errorlog.warning('rejected rule (%s) in state %d', rejected, state) - already_reported.add((state, id(rule), id(rejected))) - - warned_never = [] - for state, rule, rejected in lr.rr_conflicts: - if not rejected.reduced and (rejected not in warned_never): - debuglog.warning('Rule (%s) is never reduced', rejected) - errorlog.warning('Rule (%s) is never reduced', rejected) - warned_never.append(rejected) - - # Write the table file if requested - if write_tables: - try: - lr.write_table(tabmodule, outputdir, signature) - if tabmodule in sys.modules: - del sys.modules[tabmodule] - except IOError as e: - errorlog.warning("Couldn't create %r. %s" % (tabmodule, e)) - - # Write a pickled version of the tables - if picklefile: - try: - lr.pickle_table(picklefile, signature) - except IOError as e: - errorlog.warning("Couldn't create %r. %s" % (picklefile, e)) - - # Build the parser - lr.bind_callables(pinfo.pdict) - parser = LRParser(lr, pinfo.error_func) - - parse = parser.parse - return parser diff --git a/xonsh/ply/ply/ygen.py b/xonsh/ply/ply/ygen.py deleted file mode 100644 index 03b9318..0000000 --- a/xonsh/ply/ply/ygen.py +++ /dev/null @@ -1,69 +0,0 @@ -# ply: ygen.py -# -# This is a support program that auto-generates different versions of the YACC parsing -# function with different features removed for the purposes of performance. -# -# Users should edit the method LRParser.parsedebug() in yacc.py. The source code -# for that method is then used to create the other methods. See the comments in -# yacc.py for further details. - -import os.path -import shutil - -def get_source_range(lines, tag): - srclines = enumerate(lines) - start_tag = '#--! %s-start' % tag - end_tag = '#--! %s-end' % tag - - for start_index, line in srclines: - if line.strip().startswith(start_tag): - break - - for end_index, line in srclines: - if line.strip().endswith(end_tag): - break - - return (start_index + 1, end_index) - -def filter_section(lines, tag): - filtered_lines = [] - include = True - tag_text = '#--! %s' % tag - for line in lines: - if line.strip().startswith(tag_text): - include = not include - elif include: - filtered_lines.append(line) - return filtered_lines - -def main(): - dirname = os.path.dirname(__file__) - shutil.copy2(os.path.join(dirname, 'yacc.py'), os.path.join(dirname, 'yacc.py.bak')) - with open(os.path.join(dirname, 'yacc.py'), 'r') as f: - lines = f.readlines() - - parse_start, parse_end = get_source_range(lines, 'parsedebug') - parseopt_start, parseopt_end = get_source_range(lines, 'parseopt') - parseopt_notrack_start, parseopt_notrack_end = get_source_range(lines, 'parseopt-notrack') - - # Get the original source - orig_lines = lines[parse_start:parse_end] - - # Filter the DEBUG sections out - parseopt_lines = filter_section(orig_lines, 'DEBUG') - - # Filter the TRACKING sections out - parseopt_notrack_lines = filter_section(parseopt_lines, 'TRACKING') - - # Replace the parser source sections with updated versions - lines[parseopt_notrack_start:parseopt_notrack_end] = parseopt_notrack_lines - lines[parseopt_start:parseopt_end] = parseopt_lines - - lines = [line.rstrip()+'\n' for line in lines] - with open(os.path.join(dirname, 'yacc.py'), 'w') as f: - f.writelines(lines) - - print('Updated yacc.py') - -if __name__ == '__main__': - main() diff --git a/xonsh/ply/setup.md b/xonsh/ply/setup.md deleted file mode 100644 index 967874a..0000000 --- a/xonsh/ply/setup.md +++ /dev/null @@ -1,40 +0,0 @@ -# Maintained, No Package Releases - -PLY is maintained software, but no longer produces package releases. -There is no `setup.py` file. It is not something that you install -with `pip` or a similar tool. You must COPY the necessary code from -PLY into your project and take ownership of it. - -Why this policy? PLY is a highly specialized tool for expert-level -programmers who are writing parsers and compilers. If you are writing -a compiler, there's a good chance that it's part of a substantially -larger project. Managing external dependencies (such as PLY) in such -projects is an ongoing challenge. However, the truth of the matter is -that PLY just isn't that big. All of the core functionality is -contained in just two files. PLY has no external dependencies of its -own. It changes very rarely. Plus, there are various customizations -that you might want to apply to how it works. So, all things equal, -it's probably better for you to copy it. - -But what about getting all of the latest improvements and bug fixes? -What improvements? PLY is implementing a 1970s-era parsing algorithm. -It's not cutting edge. As for bug fixes, you'll know pretty rapidly -if PLY works for your project or not. If it's working, there's -literally no reason to ever upgrade it. Keep using the version of code -that you copied. If you think you've found a bug, check back with the -repository to see if it's been fixed. Or submit it as an issue so that -it can be looked at. - - - - - - - - - - - - - - diff --git a/xonsh/ply/test/README b/xonsh/ply/test/README deleted file mode 100644 index 03b167c..0000000 --- a/xonsh/ply/test/README +++ /dev/null @@ -1,8 +0,0 @@ -This directory mostly contains tests for various types of error -conditions. To run: - - $ python testlex.py - $ python testyacc.py - $ python testcpp.py - -The script 'cleanup.sh' cleans up this directory to its original state. diff --git a/xonsh/ply/test/calclex.py b/xonsh/ply/test/calclex.py deleted file mode 100644 index 030a986..0000000 --- a/xonsh/ply/test/calclex.py +++ /dev/null @@ -1,49 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lexer = lex.lex() - - - diff --git a/xonsh/ply/test/cleanup.sh b/xonsh/ply/test/cleanup.sh deleted file mode 100755 index 9374f2c..0000000 --- a/xonsh/ply/test/cleanup.sh +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/sh - -rm -rf *~ *.pyc *.pyo *.dif *.out __pycache__ - diff --git a/xonsh/ply/test/lex_closure.py b/xonsh/ply/test/lex_closure.py deleted file mode 100644 index 30ee679..0000000 --- a/xonsh/ply/test/lex_closure.py +++ /dev/null @@ -1,54 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_closure.py -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -def make_calc(): - - # Tokens - - t_PLUS = r'\+' - t_MINUS = r'-' - t_TIMES = r'\*' - t_DIVIDE = r'/' - t_EQUALS = r'=' - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - - t_ignore = " \t" - - def t_newline(t): - r'\n+' - t.lineno += t.value.count("\n") - - def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - # Build the lexer - return lex.lex() - -make_calc() -lex.runmain(data="3+4") - - - diff --git a/xonsh/ply/test/lex_doc1.py b/xonsh/ply/test/lex_doc1.py deleted file mode 100644 index 8a2bfcc..0000000 --- a/xonsh/ply/test/lex_doc1.py +++ /dev/null @@ -1,26 +0,0 @@ -# lex_doc1.py -# -# Missing documentation string - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -def t_NUMBER(t): - pass - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_dup1.py b/xonsh/ply/test/lex_dup1.py deleted file mode 100644 index fd04cdb..0000000 --- a/xonsh/ply/test/lex_dup1.py +++ /dev/null @@ -1,29 +0,0 @@ -# lex_dup1.py -# -# Duplicated rule specifiers - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -t_NUMBER = r'\d+' - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_dup2.py b/xonsh/ply/test/lex_dup2.py deleted file mode 100644 index 870e5e7..0000000 --- a/xonsh/ply/test/lex_dup2.py +++ /dev/null @@ -1,33 +0,0 @@ -# lex_dup2.py -# -# Duplicated rule specifiers - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -def t_NUMBER(t): - r'\d+' - pass - -def t_NUMBER(t): - r'\d+' - pass - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_dup3.py b/xonsh/ply/test/lex_dup3.py deleted file mode 100644 index 94b5592..0000000 --- a/xonsh/ply/test/lex_dup3.py +++ /dev/null @@ -1,31 +0,0 @@ -# lex_dup3.py -# -# Duplicated rule specifiers - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_NUMBER(t): - r'\d+' - pass - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_empty.py b/xonsh/ply/test/lex_empty.py deleted file mode 100644 index e0368bf..0000000 --- a/xonsh/ply/test/lex_empty.py +++ /dev/null @@ -1,20 +0,0 @@ -# lex_empty.py -# -# No rules defined - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_error1.py b/xonsh/ply/test/lex_error1.py deleted file mode 100644 index 4508a80..0000000 --- a/xonsh/ply/test/lex_error1.py +++ /dev/null @@ -1,24 +0,0 @@ -# lex_error1.py -# -# Missing t_error() rule - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_error2.py b/xonsh/ply/test/lex_error2.py deleted file mode 100644 index 8040d39..0000000 --- a/xonsh/ply/test/lex_error2.py +++ /dev/null @@ -1,26 +0,0 @@ -# lex_error2.py -# -# t_error defined, but not function - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -t_error = "foo" - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_error3.py b/xonsh/ply/test/lex_error3.py deleted file mode 100644 index 1feefb6..0000000 --- a/xonsh/ply/test/lex_error3.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_error3.py -# -# t_error defined as function, but with wrong # args - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_error4.py b/xonsh/ply/test/lex_error4.py deleted file mode 100644 index f4f48db..0000000 --- a/xonsh/ply/test/lex_error4.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_error4.py -# -# t_error defined as function, but too many args - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(t,s): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_hedit.py b/xonsh/ply/test/lex_hedit.py deleted file mode 100644 index 34f15a1..0000000 --- a/xonsh/ply/test/lex_hedit.py +++ /dev/null @@ -1,47 +0,0 @@ -# ----------------------------------------------------------------------------- -# hedit.py -# -# Paring of Fortran H Edit descriptions (Contributed by Pearu Peterson) -# -# These tokens can't be easily tokenized because they are of the following -# form: -# -# nHc1...cn -# -# where n is a positive integer and c1 ... cn are characters. -# -# This example shows how to modify the state of the lexer to parse -# such tokens -# ----------------------------------------------------------------------------- -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = ( - 'H_EDIT_DESCRIPTOR', - ) - -# Tokens -t_ignore = " \t\n" - -def t_H_EDIT_DESCRIPTOR(t): - r"\d+H.*" # This grabs all of the remaining text - i = t.value.index('H') - n = eval(t.value[:i]) - - # Adjust the tokenizing position - t.lexer.lexpos -= len(t.value) - (i+1+n) - t.value = t.value[i+1:i+1+n] - return t - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lex.lex() -lex.runmain(data="3Habc 10Habcdefghij 2Hxy") - - - diff --git a/xonsh/ply/test/lex_ignore.py b/xonsh/ply/test/lex_ignore.py deleted file mode 100644 index 6c43b4c..0000000 --- a/xonsh/ply/test/lex_ignore.py +++ /dev/null @@ -1,31 +0,0 @@ -# lex_ignore.py -# -# Improperly specific ignore declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_ignore(t): - ' \t' - pass - -def t_error(t): - pass - -import sys - -lex.lex() - - diff --git a/xonsh/ply/test/lex_ignore2.py b/xonsh/ply/test/lex_ignore2.py deleted file mode 100644 index f60987a..0000000 --- a/xonsh/ply/test/lex_ignore2.py +++ /dev/null @@ -1,29 +0,0 @@ -# lex_ignore2.py -# -# ignore declaration as a raw string - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -t_ignore = r' \t' - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_literal1.py b/xonsh/ply/test/lex_literal1.py deleted file mode 100644 index db389c3..0000000 --- a/xonsh/ply/test/lex_literal1.py +++ /dev/null @@ -1,25 +0,0 @@ -# lex_literal1.py -# -# Bad literal specification - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "NUMBER", - ] - -literals = ["+","-","**"] - -def t_NUMBER(t): - r'\d+' - return t - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_literal2.py b/xonsh/ply/test/lex_literal2.py deleted file mode 100644 index b50b92c..0000000 --- a/xonsh/ply/test/lex_literal2.py +++ /dev/null @@ -1,25 +0,0 @@ -# lex_literal2.py -# -# Bad literal specification - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "NUMBER", - ] - -literals = 23 - -def t_NUMBER(t): - r'\d+' - return t - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_literal3.py b/xonsh/ply/test/lex_literal3.py deleted file mode 100644 index 91ab980..0000000 --- a/xonsh/ply/test/lex_literal3.py +++ /dev/null @@ -1,26 +0,0 @@ -# lex_literal3.py -# -# An empty literal specification given as a list -# Issue 8 : Literals empty list causes IndexError - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "NUMBER", - ] - -literals = [] - -def t_NUMBER(t): - r'\d+' - return t - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_many_tokens.py b/xonsh/ply/test/lex_many_tokens.py deleted file mode 100644 index 77ae12b..0000000 --- a/xonsh/ply/test/lex_many_tokens.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_many_tokens.py -# -# Test lex's ability to handle a large number of tokens (beyond the -# 100-group limit of the re module) - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = ["TOK%d" % i for i in range(1000)] - -for tok in tokens: - if sys.version_info[0] < 3: - exec("t_%s = '%s:'" % (tok,tok)) - else: - exec("t_%s = '%s:'" % (tok,tok), globals()) - -t_ignore = " \t" - -def t_error(t): - pass - -lex.lex(optimize=1,lextab="manytab") -lex.runmain(data="TOK34: TOK143: TOK269: TOK372: TOK452: TOK561: TOK999:") - - diff --git a/xonsh/ply/test/lex_module.py b/xonsh/ply/test/lex_module.py deleted file mode 100644 index 8bdd3ed..0000000 --- a/xonsh/ply/test/lex_module.py +++ /dev/null @@ -1,10 +0,0 @@ -# lex_module.py -# - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex -import lex_module_import -lex.lex(module=lex_module_import) -lex.runmain(data="3+4") diff --git a/xonsh/ply/test/lex_module_import.py b/xonsh/ply/test/lex_module_import.py deleted file mode 100644 index df42082..0000000 --- a/xonsh/ply/test/lex_module_import.py +++ /dev/null @@ -1,42 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_module_import.py -# -# A lexer defined in a module, but built in lex_module.py -# ----------------------------------------------------------------------------- - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - diff --git a/xonsh/ply/test/lex_object.py b/xonsh/ply/test/lex_object.py deleted file mode 100644 index 7e9f389..0000000 --- a/xonsh/ply/test/lex_object.py +++ /dev/null @@ -1,55 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_object.py -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -class CalcLexer: - tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - - # Tokens - - t_PLUS = r'\+' - t_MINUS = r'-' - t_TIMES = r'\*' - t_DIVIDE = r'/' - t_EQUALS = r'=' - t_LPAREN = r'\(' - t_RPAREN = r'\)' - t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - - def t_NUMBER(self,t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - - t_ignore = " \t" - - def t_newline(self,t): - r'\n+' - t.lineno += t.value.count("\n") - - def t_error(self,t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - - -calc = CalcLexer() - -# Build the lexer -lex.lex(object=calc) -lex.runmain(data="3+4") - - - - diff --git a/xonsh/ply/test/lex_opt_alias.py b/xonsh/ply/test/lex_opt_alias.py deleted file mode 100644 index 5d5ed4c..0000000 --- a/xonsh/ply/test/lex_opt_alias.py +++ /dev/null @@ -1,54 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_opt_alias.py -# -# Tests ability to match up functions with states, aliases, and -# lexing tables. -# ----------------------------------------------------------------------------- - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -tokens = ( - 'NAME','NUMBER', - ) - -states = (('instdef','inclusive'),('spam','exclusive')) - -literals = ['=','+','-','*','/', '(',')'] - -# Tokens - -def t_instdef_spam_BITS(t): - r'[01-]+' - return t - -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ANY_NUMBER = NUMBER - -t_ignore = " \t" -t_spam_ignore = t_ignore - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -t_spam_error = t_error - -# Build the lexer -import ply.lex as lex -lex.lex(optimize=1,lextab="aliastab") -lex.runmain(data="3+4") diff --git a/xonsh/ply/test/lex_optimize.py b/xonsh/ply/test/lex_optimize.py deleted file mode 100644 index 0e447e6..0000000 --- a/xonsh/ply/test/lex_optimize.py +++ /dev/null @@ -1,50 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_optimize.py -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lex.lex(optimize=1) -lex.runmain(data="3+4") - - - diff --git a/xonsh/ply/test/lex_optimize2.py b/xonsh/ply/test/lex_optimize2.py deleted file mode 100644 index 64555f6..0000000 --- a/xonsh/ply/test/lex_optimize2.py +++ /dev/null @@ -1,50 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_optimize2.py -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lex.lex(optimize=1,lextab="opt2tab") -lex.runmain(data="3+4") - - - diff --git a/xonsh/ply/test/lex_optimize3.py b/xonsh/ply/test/lex_optimize3.py deleted file mode 100644 index b8df5aa..0000000 --- a/xonsh/ply/test/lex_optimize3.py +++ /dev/null @@ -1,52 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_optimize3.py -# -# Writes table in a subdirectory structure. -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lex.lex(optimize=1,lextab="lexdir.sub.calctab" ,outputdir="lexdir/sub") -lex.runmain(data="3+4") - - - diff --git a/xonsh/ply/test/lex_optimize4.py b/xonsh/ply/test/lex_optimize4.py deleted file mode 100644 index cc6e2a9..0000000 --- a/xonsh/ply/test/lex_optimize4.py +++ /dev/null @@ -1,26 +0,0 @@ -# ----------------------------------------------------------------------------- -# lex_optimize4.py -# ----------------------------------------------------------------------------- -import re -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+?' -t_MINUS = r'-' -t_NUMBER = r'(\d+)' - -def t_error(t): - pass - - -# Build the lexer -lex.lex(optimize=True, lextab="opt4tab", reflags=re.UNICODE) -lex.runmain(data="3+4") diff --git a/xonsh/ply/test/lex_re1.py b/xonsh/ply/test/lex_re1.py deleted file mode 100644 index 5be7aef..0000000 --- a/xonsh/ply/test/lex_re1.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_re1.py -# -# Bad regular expression in a string - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'(\d+' - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_re2.py b/xonsh/ply/test/lex_re2.py deleted file mode 100644 index 8dfb8e3..0000000 --- a/xonsh/ply/test/lex_re2.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_re2.py -# -# Regular expression rule matches empty string - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+?' -t_MINUS = r'-' -t_NUMBER = r'(\d+)' - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_re3.py b/xonsh/ply/test/lex_re3.py deleted file mode 100644 index e179925..0000000 --- a/xonsh/ply/test/lex_re3.py +++ /dev/null @@ -1,29 +0,0 @@ -# lex_re3.py -# -# Regular expression rule matches empty string - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - "POUND", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'(\d+)' -t_POUND = r'#' - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_rule1.py b/xonsh/ply/test/lex_rule1.py deleted file mode 100644 index 0406c6f..0000000 --- a/xonsh/ply/test/lex_rule1.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_rule1.py -# -# Rule function with incorrect number of arguments - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = 1 - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_rule2.py b/xonsh/ply/test/lex_rule2.py deleted file mode 100644 index 1c29d87..0000000 --- a/xonsh/ply/test/lex_rule2.py +++ /dev/null @@ -1,29 +0,0 @@ -# lex_rule2.py -# -# Rule function with incorrect number of arguments - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -def t_NUMBER(): - r'\d+' - return t - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_rule3.py b/xonsh/ply/test/lex_rule3.py deleted file mode 100644 index 9ea94da..0000000 --- a/xonsh/ply/test/lex_rule3.py +++ /dev/null @@ -1,27 +0,0 @@ -# lex_rule3.py -# -# Rule function with incorrect number of arguments - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -def t_NUMBER(t,s): - r'\d+' - return t - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state1.py b/xonsh/ply/test/lex_state1.py deleted file mode 100644 index 7528c91..0000000 --- a/xonsh/ply/test/lex_state1.py +++ /dev/null @@ -1,40 +0,0 @@ -# lex_state1.py -# -# Bad state declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = 'comment' - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state2.py b/xonsh/ply/test/lex_state2.py deleted file mode 100644 index 3aef69e..0000000 --- a/xonsh/ply/test/lex_state2.py +++ /dev/null @@ -1,40 +0,0 @@ -# lex_state2.py -# -# Bad state declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = ('comment','example') - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state3.py b/xonsh/ply/test/lex_state3.py deleted file mode 100644 index 616e484..0000000 --- a/xonsh/ply/test/lex_state3.py +++ /dev/null @@ -1,42 +0,0 @@ -# lex_state3.py -# -# Bad state declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -comment = 1 -states = ((comment, 'inclusive'), - ('example', 'exclusive')) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state4.py b/xonsh/ply/test/lex_state4.py deleted file mode 100644 index 1825016..0000000 --- a/xonsh/ply/test/lex_state4.py +++ /dev/null @@ -1,41 +0,0 @@ -# lex_state4.py -# -# Bad state declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - - -states = (('comment', 'exclsive'),) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state5.py b/xonsh/ply/test/lex_state5.py deleted file mode 100644 index 4ce828e..0000000 --- a/xonsh/ply/test/lex_state5.py +++ /dev/null @@ -1,40 +0,0 @@ -# lex_state5.py -# -# Bad state declaration - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = (('comment', 'exclusive'), - ('comment', 'exclusive')) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state_noerror.py b/xonsh/ply/test/lex_state_noerror.py deleted file mode 100644 index 90bbea8..0000000 --- a/xonsh/ply/test/lex_state_noerror.py +++ /dev/null @@ -1,39 +0,0 @@ -# lex_state_noerror.py -# -# Declaration of a state for which no rules are defined - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = (('comment', 'exclusive'),) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state_norule.py b/xonsh/ply/test/lex_state_norule.py deleted file mode 100644 index 64ec6d3..0000000 --- a/xonsh/ply/test/lex_state_norule.py +++ /dev/null @@ -1,40 +0,0 @@ -# lex_state_norule.py -# -# Declaration of a state for which no rules are defined - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = (('comment', 'exclusive'), - ('example', 'exclusive')) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_state_try.py b/xonsh/ply/test/lex_state_try.py deleted file mode 100644 index fd5ba22..0000000 --- a/xonsh/ply/test/lex_state_try.py +++ /dev/null @@ -1,45 +0,0 @@ -# lex_state_try.py -# -# Declaration of a state for which no rules are defined - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -states = (('comment', 'exclusive'),) - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -t_ignore = " \t" - -# Comments -def t_comment(t): - r'/\*' - t.lexer.begin('comment') - print("Entering comment state") - -def t_comment_body_part(t): - r'(.|\n)*\*/' - print("comment body %s" % t) - t.lexer.begin('INITIAL') - -def t_error(t): - pass - -t_comment_error = t_error -t_comment_ignore = t_ignore - -lex.lex() - -data = "3 + 4 /* This is a comment */ + 10" - -lex.runmain(data=data) diff --git a/xonsh/ply/test/lex_token1.py b/xonsh/ply/test/lex_token1.py deleted file mode 100644 index 6fca300..0000000 --- a/xonsh/ply/test/lex_token1.py +++ /dev/null @@ -1,19 +0,0 @@ -# lex_token1.py -# -# Tests for absence of tokens variable - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_token2.py b/xonsh/ply/test/lex_token2.py deleted file mode 100644 index 6e65ab0..0000000 --- a/xonsh/ply/test/lex_token2.py +++ /dev/null @@ -1,22 +0,0 @@ -# lex_token2.py -# -# Tests for tokens of wrong type - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = "PLUS MINUS NUMBER" - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(t): - pass - - -lex.lex() - - diff --git a/xonsh/ply/test/lex_token3.py b/xonsh/ply/test/lex_token3.py deleted file mode 100644 index 636452e..0000000 --- a/xonsh/ply/test/lex_token3.py +++ /dev/null @@ -1,24 +0,0 @@ -# lex_token3.py -# -# tokens is right type, but is missing a token for one rule - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_token4.py b/xonsh/ply/test/lex_token4.py deleted file mode 100644 index 52947e9..0000000 --- a/xonsh/ply/test/lex_token4.py +++ /dev/null @@ -1,26 +0,0 @@ -# lex_token4.py -# -# Bad token name - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "-", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' -t_NUMBER = r'\d+' - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/lex_token5.py b/xonsh/ply/test/lex_token5.py deleted file mode 100644 index ef7a3c5..0000000 --- a/xonsh/ply/test/lex_token5.py +++ /dev/null @@ -1,31 +0,0 @@ -# lex_token5.py -# -# Return a bad token name - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - ] - -t_PLUS = r'\+' -t_MINUS = r'-' - -def t_NUMBER(t): - r'\d+' - t.type = "NUM" - return t - -def t_error(t): - pass - -lex.lex() -lex.input("1234") -t = lex.token() - - diff --git a/xonsh/ply/test/lex_token_dup.py b/xonsh/ply/test/lex_token_dup.py deleted file mode 100644 index 384f4e9..0000000 --- a/xonsh/ply/test/lex_token_dup.py +++ /dev/null @@ -1,29 +0,0 @@ -# lex_token_dup.py -# -# Duplicate token name in tokens - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") - -import ply.lex as lex - -tokens = [ - "PLUS", - "MINUS", - "NUMBER", - "MINUS" - ] - -t_PLUS = r'\+' -t_MINUS = r'-' - -def t_NUMBER(t): - r'\d+' - return t - -def t_error(t): - pass - -lex.lex() - - diff --git a/xonsh/ply/test/pkg_test1/__init__.py b/xonsh/ply/test/pkg_test1/__init__.py deleted file mode 100644 index 0e19558..0000000 --- a/xonsh/ply/test/pkg_test1/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Tests proper handling of lextab and parsetab files in package structures - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -from .parsing.calcparse import parser - diff --git a/xonsh/ply/test/pkg_test1/parsing/__init__.py b/xonsh/ply/test/pkg_test1/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test1/parsing/calclex.py b/xonsh/ply/test/pkg_test1/parsing/calclex.py deleted file mode 100644 index b3c1a4d..0000000 --- a/xonsh/ply/test/pkg_test1/parsing/calclex.py +++ /dev/null @@ -1,47 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lexer = lex.lex(optimize=True) - - - diff --git a/xonsh/ply/test/pkg_test1/parsing/calcparse.py b/xonsh/ply/test/pkg_test1/parsing/calcparse.py deleted file mode 100644 index c058e9f..0000000 --- a/xonsh/ply/test/pkg_test1/parsing/calcparse.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -parser = yacc.yacc() - - - - - diff --git a/xonsh/ply/test/pkg_test2/__init__.py b/xonsh/ply/test/pkg_test2/__init__.py deleted file mode 100644 index 0e19558..0000000 --- a/xonsh/ply/test/pkg_test2/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Tests proper handling of lextab and parsetab files in package structures - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -from .parsing.calcparse import parser - diff --git a/xonsh/ply/test/pkg_test2/parsing/__init__.py b/xonsh/ply/test/pkg_test2/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test2/parsing/calclex.py b/xonsh/ply/test/pkg_test2/parsing/calclex.py deleted file mode 100644 index 789e13f..0000000 --- a/xonsh/ply/test/pkg_test2/parsing/calclex.py +++ /dev/null @@ -1,47 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lexer = lex.lex(optimize=True, lextab='calclextab') - - - diff --git a/xonsh/ply/test/pkg_test2/parsing/calcparse.py b/xonsh/ply/test/pkg_test2/parsing/calcparse.py deleted file mode 100644 index f519338..0000000 --- a/xonsh/ply/test/pkg_test2/parsing/calcparse.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -parser = yacc.yacc(tabmodule='calcparsetab') - - - - - diff --git a/xonsh/ply/test/pkg_test3/__init__.py b/xonsh/ply/test/pkg_test3/__init__.py deleted file mode 100644 index 0e19558..0000000 --- a/xonsh/ply/test/pkg_test3/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Tests proper handling of lextab and parsetab files in package structures - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -from .parsing.calcparse import parser - diff --git a/xonsh/ply/test/pkg_test3/generated/__init__.py b/xonsh/ply/test/pkg_test3/generated/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test3/parsing/__init__.py b/xonsh/ply/test/pkg_test3/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test3/parsing/calclex.py b/xonsh/ply/test/pkg_test3/parsing/calclex.py deleted file mode 100644 index 6ca2c4f..0000000 --- a/xonsh/ply/test/pkg_test3/parsing/calclex.py +++ /dev/null @@ -1,47 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lexer = lex.lex(optimize=True, lextab='pkg_test3.generated.lextab') - - - diff --git a/xonsh/ply/test/pkg_test3/parsing/calcparse.py b/xonsh/ply/test/pkg_test3/parsing/calcparse.py deleted file mode 100644 index 2dcb52b..0000000 --- a/xonsh/ply/test/pkg_test3/parsing/calcparse.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -parser = yacc.yacc(tabmodule='pkg_test3.generated.parsetab') - - - - - diff --git a/xonsh/ply/test/pkg_test4/__init__.py b/xonsh/ply/test/pkg_test4/__init__.py deleted file mode 100644 index ba9ddac..0000000 --- a/xonsh/ply/test/pkg_test4/__init__.py +++ /dev/null @@ -1,25 +0,0 @@ -# Tests proper handling of lextab and parsetab files in package structures -# Check of warning messages when files aren't writable - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -import ply.lex -import ply.yacc - -def patched_open(filename, mode): - if 'w' in mode: - raise IOError("Permission denied %r" % filename) - return open(filename, mode) - -ply.lex.open = patched_open -ply.yacc.open = patched_open -try: - from .parsing.calcparse import parser -finally: - del ply.lex.open - del ply.yacc.open - - diff --git a/xonsh/ply/test/pkg_test4/parsing/__init__.py b/xonsh/ply/test/pkg_test4/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test4/parsing/calclex.py b/xonsh/ply/test/pkg_test4/parsing/calclex.py deleted file mode 100644 index b3c1a4d..0000000 --- a/xonsh/ply/test/pkg_test4/parsing/calclex.py +++ /dev/null @@ -1,47 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -lexer = lex.lex(optimize=True) - - - diff --git a/xonsh/ply/test/pkg_test4/parsing/calcparse.py b/xonsh/ply/test/pkg_test4/parsing/calcparse.py deleted file mode 100644 index c058e9f..0000000 --- a/xonsh/ply/test/pkg_test4/parsing/calcparse.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -parser = yacc.yacc() - - - - - diff --git a/xonsh/ply/test/pkg_test5/__init__.py b/xonsh/ply/test/pkg_test5/__init__.py deleted file mode 100644 index 0e19558..0000000 --- a/xonsh/ply/test/pkg_test5/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Tests proper handling of lextab and parsetab files in package structures - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -from .parsing.calcparse import parser - diff --git a/xonsh/ply/test/pkg_test5/parsing/__init__.py b/xonsh/ply/test/pkg_test5/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test5/parsing/calclex.py b/xonsh/ply/test/pkg_test5/parsing/calclex.py deleted file mode 100644 index e8759b6..0000000 --- a/xonsh/ply/test/pkg_test5/parsing/calclex.py +++ /dev/null @@ -1,48 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import os.path -lexer = lex.lex(optimize=True, outputdir=os.path.dirname(__file__)) - - - diff --git a/xonsh/ply/test/pkg_test5/parsing/calcparse.py b/xonsh/ply/test/pkg_test5/parsing/calcparse.py deleted file mode 100644 index 2a1ddfe..0000000 --- a/xonsh/ply/test/pkg_test5/parsing/calcparse.py +++ /dev/null @@ -1,67 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -import os.path -parser = yacc.yacc(outputdir=os.path.dirname(__file__)) - - - - - diff --git a/xonsh/ply/test/pkg_test6/__init__.py b/xonsh/ply/test/pkg_test6/__init__.py deleted file mode 100644 index 5dbe0cb..0000000 --- a/xonsh/ply/test/pkg_test6/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Tests proper sorting of modules in yacc.ParserReflect.get_pfunctions - -# Here for testing purposes -import sys -if '..' not in sys.path: - sys.path.insert(0, '..') - -from .parsing.calcparse import parser - diff --git a/xonsh/ply/test/pkg_test6/parsing/__init__.py b/xonsh/ply/test/pkg_test6/parsing/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ply/test/pkg_test6/parsing/calclex.py b/xonsh/ply/test/pkg_test6/parsing/calclex.py deleted file mode 100644 index e8759b6..0000000 --- a/xonsh/ply/test/pkg_test6/parsing/calclex.py +++ /dev/null @@ -1,48 +0,0 @@ -# ----------------------------------------------------------------------------- -# calclex.py -# ----------------------------------------------------------------------------- - -import ply.lex as lex - -tokens = ( - 'NAME','NUMBER', - 'PLUS','MINUS','TIMES','DIVIDE','EQUALS', - 'LPAREN','RPAREN', - ) - -# Tokens - -t_PLUS = r'\+' -t_MINUS = r'-' -t_TIMES = r'\*' -t_DIVIDE = r'/' -t_EQUALS = r'=' -t_LPAREN = r'\(' -t_RPAREN = r'\)' -t_NAME = r'[a-zA-Z_][a-zA-Z0-9_]*' - -def t_NUMBER(t): - r'\d+' - try: - t.value = int(t.value) - except ValueError: - print("Integer value too large %s" % t.value) - t.value = 0 - return t - -t_ignore = " \t" - -def t_newline(t): - r'\n+' - t.lexer.lineno += t.value.count("\n") - -def t_error(t): - print("Illegal character '%s'" % t.value[0]) - t.lexer.skip(1) - -# Build the lexer -import os.path -lexer = lex.lex(optimize=True, outputdir=os.path.dirname(__file__)) - - - diff --git a/xonsh/ply/test/pkg_test6/parsing/calcparse.py b/xonsh/ply/test/pkg_test6/parsing/calcparse.py deleted file mode 100644 index 6defaf9..0000000 --- a/xonsh/ply/test/pkg_test6/parsing/calcparse.py +++ /dev/null @@ -1,33 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- - -from .calclex import tokens -from ply import yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -from .statement import * - -from .expression import * - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -import os.path -parser = yacc.yacc(outputdir=os.path.dirname(__file__)) - - - - - diff --git a/xonsh/ply/test/pkg_test6/parsing/expression.py b/xonsh/ply/test/pkg_test6/parsing/expression.py deleted file mode 100644 index 028f662..0000000 --- a/xonsh/ply/test/pkg_test6/parsing/expression.py +++ /dev/null @@ -1,31 +0,0 @@ -# This file contains definitions of expression grammar - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 diff --git a/xonsh/ply/test/pkg_test6/parsing/statement.py b/xonsh/ply/test/pkg_test6/parsing/statement.py deleted file mode 100644 index ef7dc55..0000000 --- a/xonsh/ply/test/pkg_test6/parsing/statement.py +++ /dev/null @@ -1,9 +0,0 @@ -# This file contains definitions of statement grammar - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - t[0] = t[1] diff --git a/xonsh/ply/test/test_cpp_nonascii.c b/xonsh/ply/test/test_cpp_nonascii.c deleted file mode 100644 index 3e97d81..0000000 --- a/xonsh/ply/test/test_cpp_nonascii.c +++ /dev/null @@ -1,2 +0,0 @@ -/* ë */ -#define x 1 \ No newline at end of file diff --git a/xonsh/ply/test/testcpp.py b/xonsh/ply/test/testcpp.py deleted file mode 100644 index dbfb3e4..0000000 --- a/xonsh/ply/test/testcpp.py +++ /dev/null @@ -1,153 +0,0 @@ -from unittest import TestCase, main - -from multiprocessing import Process, Queue -from six.moves.queue import Empty - -import sys -import locale - -if ".." not in sys.path: - sys.path.insert(0, "..") - -from ply.lex import lex -from ply.cpp import * - - -def preprocessing(in_, out_queue): - out = None - - try: - p = Preprocessor(lex()) - p.parse(in_) - tokens = [t.value for t in p.parser] - out = "".join(tokens) - finally: - out_queue.put(out) - -class CPPTests(TestCase): - "Tests related to ANSI-C style lexical preprocessor." - - def __test_preprocessing(self, in_, expected, time_limit = 1.0): - out_queue = Queue() - - preprocessor = Process( - name = "PLY`s C preprocessor", - target = preprocessing, - args = (in_, out_queue) - ) - - preprocessor.start() - - try: - out = out_queue.get(timeout = time_limit) - except Empty: - preprocessor.terminate() - raise RuntimeError("Time limit exceeded!") - else: - self.assertMultiLineEqual(out, expected) - - def test_infinite_argument_expansion(self): - # CPP does not drags set of currently expanded macros through macro - # arguments expansion. If there is a match between an argument value - # and name of an already expanded macro then CPP falls into infinite - # recursion. - self.__test_preprocessing("""\ -#define a(x) x -#define b a(b) -b -""" , """\ - - -b""" - ) - - - def test_concatenation(self): - self.__test_preprocessing("""\ -#define a(x) x##_ -#define b(x) _##x -#define c(x) _##x##_ -#define d(x,y) _##x##y##_ - -a(i) -b(j) -c(k) -d(q,s)""" - , """\ - - - - - -i_ -_j -_k_ -_qs_""" - ) - - def test_deadloop_macro(self): - # If there is a word which equals to name of a parametrized macro, then - # attempt to expand such word as a macro manages the parser to fall - # into an infinite loop. - - self.__test_preprocessing("""\ -#define a(x) x - -a;""" - , """\ - - -a;""" - ) - - def test_index_error(self): - # If there are no tokens after a word ("a") which equals to name of - # a parameterized macro, then attempt to expand this word leads to - # IndexError. - - self.__test_preprocessing("""\ -#define a(x) x - -a""" - , """\ - - -a""" - ) - - def test_evalexpr(self): - # #if 1 != 2 is not processed correctly; undefined values are converted - # to 0L instead of 0 (issue #195) - # - self.__test_preprocessing("""\ -#if (1!=0) && (!x || (!(1==2))) -a; -#else -b; -#endif -""" - , """\ - -a; - -""" - ) - - def test_include_nonascii(self): - # Issue #196: #included files are read using the current locale's - # getdefaultencoding. if a #included file contains non-ascii characters, - # while default encoding is e.g. US_ASCII, this causes an error - locale.setlocale(locale.LC_ALL, 'C') - self.__test_preprocessing("""\ -#include "test_cpp_nonascii.c" -x; - -""" - , """\ - - -1; -""" - ) - -main() diff --git a/xonsh/ply/test/testlex.py b/xonsh/ply/test/testlex.py deleted file mode 100755 index a94ed64..0000000 --- a/xonsh/ply/test/testlex.py +++ /dev/null @@ -1,682 +0,0 @@ -# testlex.py - -import unittest -try: - import StringIO -except ImportError: - import io as StringIO - -import sys -import os -import warnings -import platform - -sys.path.insert(0,"..") -sys.tracebacklimit = 0 - -import ply.lex - -try: - from importlib.util import cache_from_source -except ImportError: - # Python 2.7, but we don't care. - cache_from_source = None - - -def make_pymodule_path(filename, optimization=None): - path = os.path.dirname(filename) - file = os.path.basename(filename) - mod, ext = os.path.splitext(file) - - if sys.hexversion >= 0x3050000: - fullpath = cache_from_source(filename, optimization=optimization) - elif sys.hexversion >= 0x3040000: - fullpath = cache_from_source(filename, ext=='.pyc') - elif sys.hexversion >= 0x3020000: - import imp - modname = mod+"."+imp.get_tag()+ext - fullpath = os.path.join(path,'__pycache__',modname) - else: - fullpath = filename - return fullpath - -def pymodule_out_exists(filename, optimization=None): - return os.path.exists(make_pymodule_path(filename, - optimization=optimization)) - -def pymodule_out_remove(filename, optimization=None): - os.remove(make_pymodule_path(filename, optimization=optimization)) - -def implementation(): - if platform.system().startswith("Java"): - return "Jython" - elif hasattr(sys, "pypy_version_info"): - return "PyPy" - else: - return "CPython" - -test_pyo = (implementation() == 'CPython') - -def check_expected(result, expected, contains=False): - if sys.version_info[0] >= 3: - if isinstance(result,str): - result = result.encode('ascii') - if isinstance(expected,str): - expected = expected.encode('ascii') - resultlines = result.splitlines() - expectedlines = expected.splitlines() - - if len(resultlines) != len(expectedlines): - return False - - for rline,eline in zip(resultlines,expectedlines): - if contains: - if eline not in rline: - return False - else: - if not rline.endswith(eline): - return False - return True - -def run_import(module): - code = "import "+module - exec(code) - del sys.modules[module] - -# Tests related to errors and warnings when building lexers -class LexErrorWarningTests(unittest.TestCase): - def setUp(self): - sys.stderr = StringIO.StringIO() - sys.stdout = StringIO.StringIO() - if sys.hexversion >= 0x3020000: - warnings.filterwarnings('ignore',category=ResourceWarning) - - def tearDown(self): - sys.stderr = sys.__stderr__ - sys.stdout = sys.__stdout__ - def test_lex_doc1(self): - self.assertRaises(SyntaxError,run_import,"lex_doc1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_doc1.py:18: No regular expression defined for rule 't_NUMBER'\n")) - def test_lex_dup1(self): - self.assertRaises(SyntaxError,run_import,"lex_dup1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_dup1.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" )) - - def test_lex_dup2(self): - self.assertRaises(SyntaxError,run_import,"lex_dup2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_dup2.py:22: Rule t_NUMBER redefined. Previously defined on line 18\n" )) - - def test_lex_dup3(self): - self.assertRaises(SyntaxError,run_import,"lex_dup3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_dup3.py:20: Rule t_NUMBER redefined. Previously defined on line 18\n" )) - - def test_lex_empty(self): - self.assertRaises(SyntaxError,run_import,"lex_empty") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No rules of the form t_rulename are defined\n" - "No rules defined for state 'INITIAL'\n")) - - def test_lex_error1(self): - run_import("lex_error1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No t_error rule is defined\n")) - - def test_lex_error2(self): - self.assertRaises(SyntaxError,run_import,"lex_error2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Rule 't_error' must be defined as a function\n") - ) - - def test_lex_error3(self): - self.assertRaises(SyntaxError,run_import,"lex_error3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_error3.py:20: Rule 't_error' requires an argument\n")) - - def test_lex_error4(self): - self.assertRaises(SyntaxError,run_import,"lex_error4") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_error4.py:20: Rule 't_error' has too many arguments\n")) - - def test_lex_ignore(self): - self.assertRaises(SyntaxError,run_import,"lex_ignore") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_ignore.py:20: Rule 't_ignore' must be defined as a string\n")) - - def test_lex_ignore2(self): - run_import("lex_ignore2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "t_ignore contains a literal backslash '\\'\n")) - - - def test_lex_re1(self): - self.assertRaises(SyntaxError,run_import,"lex_re1") - result = sys.stderr.getvalue() - if sys.hexversion < 0x3050000: - msg = "Invalid regular expression for rule 't_NUMBER'. unbalanced parenthesis\n" - else: - msg = "Invalid regular expression for rule 't_NUMBER'. missing ), unterminated subpattern at position 0" - self.assert_(check_expected(result, - msg, - contains=True)) - - def test_lex_re2(self): - self.assertRaises(SyntaxError,run_import,"lex_re2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Regular expression for rule 't_PLUS' matches empty string\n")) - - def test_lex_re3(self): - self.assertRaises(SyntaxError,run_import,"lex_re3") - result = sys.stderr.getvalue() -# self.assert_(check_expected(result, -# "Invalid regular expression for rule 't_POUND'. unbalanced parenthesis\n" -# "Make sure '#' in rule 't_POUND' is escaped with '\\#'\n")) - - if sys.hexversion < 0x3050000: - msg = ("Invalid regular expression for rule 't_POUND'. unbalanced parenthesis\n" - "Make sure '#' in rule 't_POUND' is escaped with '\\#'\n") - else: - msg = ("Invalid regular expression for rule 't_POUND'. missing ), unterminated subpattern at position 0\n" - "ERROR: Make sure '#' in rule 't_POUND' is escaped with '\#'") - self.assert_(check_expected(result, - msg, - contains=True), result) - - def test_lex_rule1(self): - self.assertRaises(SyntaxError,run_import,"lex_rule1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "t_NUMBER not defined as a function or string\n")) - - def test_lex_rule2(self): - self.assertRaises(SyntaxError,run_import,"lex_rule2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_rule2.py:18: Rule 't_NUMBER' requires an argument\n")) - - def test_lex_rule3(self): - self.assertRaises(SyntaxError,run_import,"lex_rule3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "lex_rule3.py:18: Rule 't_NUMBER' has too many arguments\n")) - - - def test_lex_state1(self): - self.assertRaises(SyntaxError,run_import,"lex_state1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "states must be defined as a tuple or list\n")) - - def test_lex_state2(self): - self.assertRaises(SyntaxError,run_import,"lex_state2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Invalid state specifier 'comment'. Must be a tuple (statename,'exclusive|inclusive')\n" - "Invalid state specifier 'example'. Must be a tuple (statename,'exclusive|inclusive')\n")) - - def test_lex_state3(self): - self.assertRaises(SyntaxError,run_import,"lex_state3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "State name 1 must be a string\n" - "No rules defined for state 'example'\n")) - - def test_lex_state4(self): - self.assertRaises(SyntaxError,run_import,"lex_state4") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "State type for state comment must be 'inclusive' or 'exclusive'\n")) - - - def test_lex_state5(self): - self.assertRaises(SyntaxError,run_import,"lex_state5") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "State 'comment' already defined\n")) - - def test_lex_state_noerror(self): - run_import("lex_state_noerror") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No error rule is defined for exclusive state 'comment'\n")) - - def test_lex_state_norule(self): - self.assertRaises(SyntaxError,run_import,"lex_state_norule") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No rules defined for state 'example'\n")) - - def test_lex_token1(self): - self.assertRaises(SyntaxError,run_import,"lex_token1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No token list is defined\n" - "Rule 't_NUMBER' defined for an unspecified token NUMBER\n" - "Rule 't_PLUS' defined for an unspecified token PLUS\n" - "Rule 't_MINUS' defined for an unspecified token MINUS\n" -)) - - def test_lex_token2(self): - self.assertRaises(SyntaxError,run_import,"lex_token2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "tokens must be a list or tuple\n" - "Rule 't_NUMBER' defined for an unspecified token NUMBER\n" - "Rule 't_PLUS' defined for an unspecified token PLUS\n" - "Rule 't_MINUS' defined for an unspecified token MINUS\n" -)) - - def test_lex_token3(self): - self.assertRaises(SyntaxError,run_import,"lex_token3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Rule 't_MINUS' defined for an unspecified token MINUS\n")) - - - def test_lex_token4(self): - self.assertRaises(SyntaxError,run_import,"lex_token4") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Bad token name '-'\n")) - - - def test_lex_token5(self): - try: - run_import("lex_token5") - except ply.lex.LexError: - e = sys.exc_info()[1] - self.assert_(check_expected(str(e),"lex_token5.py:19: Rule 't_NUMBER' returned an unknown token type 'NUM'")) - - def test_lex_token_dup(self): - run_import("lex_token_dup") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Token 'MINUS' multiply defined\n")) - - - def test_lex_literal1(self): - self.assertRaises(SyntaxError,run_import,"lex_literal1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Invalid literal '**'. Must be a single character\n")) - - def test_lex_literal2(self): - self.assertRaises(SyntaxError,run_import,"lex_literal2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Invalid literals specification. literals must be a sequence of characters\n")) - -import os -import subprocess -import shutil - -# Tests related to various build options associated with lexers -class LexBuildOptionTests(unittest.TestCase): - def setUp(self): - sys.stderr = StringIO.StringIO() - sys.stdout = StringIO.StringIO() - def tearDown(self): - sys.stderr = sys.__stderr__ - sys.stdout = sys.__stdout__ - try: - shutil.rmtree("lexdir") - except OSError: - pass - - def test_lex_module(self): - run_import("lex_module") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - - def test_lex_object(self): - run_import("lex_object") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - - def test_lex_closure(self): - run_import("lex_closure") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - - def test_lex_optimize(self): - try: - os.remove("lextab.py") - except OSError: - pass - try: - os.remove("lextab.pyc") - except OSError: - pass - try: - os.remove("lextab.pyo") - except OSError: - pass - run_import("lex_optimize") - - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - self.assert_(os.path.exists("lextab.py")) - - p = subprocess.Popen([sys.executable,'-O','lex_optimize.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("lextab.pyo", 1)) - pymodule_out_remove("lextab.pyo", 1) - - p = subprocess.Popen([sys.executable,'-OO','lex_optimize.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - - if test_pyo: - self.assert_(pymodule_out_exists("lextab.pyo", 2)) - try: - os.remove("lextab.py") - except OSError: - pass - try: - pymodule_out_remove("lextab.pyc") - except OSError: - pass - try: - pymodule_out_remove("lextab.pyo", 2) - except OSError: - pass - - def test_lex_optimize2(self): - try: - os.remove("opt2tab.py") - except OSError: - pass - try: - os.remove("opt2tab.pyc") - except OSError: - pass - try: - os.remove("opt2tab.pyo") - except OSError: - pass - run_import("lex_optimize2") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - self.assert_(os.path.exists("opt2tab.py")) - - p = subprocess.Popen([sys.executable,'-O','lex_optimize2.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("opt2tab.pyo", 1)) - pymodule_out_remove("opt2tab.pyo", 1) - p = subprocess.Popen([sys.executable,'-OO','lex_optimize2.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("opt2tab.pyo", 2)) - try: - os.remove("opt2tab.py") - except OSError: - pass - try: - pymodule_out_remove("opt2tab.pyc") - except OSError: - pass - try: - pymodule_out_remove("opt2tab.pyo", 2) - except OSError: - pass - - def test_lex_optimize3(self): - try: - shutil.rmtree("lexdir") - except OSError: - pass - - os.mkdir("lexdir") - os.mkdir("lexdir/sub") - with open("lexdir/__init__.py","w") as f: - f.write("") - with open("lexdir/sub/__init__.py","w") as f: - f.write("") - run_import("lex_optimize3") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - self.assert_(os.path.exists("lexdir/sub/calctab.py")) - - p = subprocess.Popen([sys.executable,'-O','lex_optimize3.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo", 1)) - pymodule_out_remove("lexdir/sub/calctab.pyo", 1) - - p = subprocess.Popen([sys.executable,'-OO','lex_optimize3.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(PLUS,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("lexdir/sub/calctab.pyo", 2)) - try: - shutil.rmtree("lexdir") - except OSError: - pass - - def test_lex_optimize4(self): - - # Regression test to make sure that reflags works correctly - # on Python 3. - - for extension in ['py', 'pyc']: - try: - os.remove("opt4tab.{0}".format(extension)) - except OSError: - pass - - run_import("lex_optimize4") - run_import("lex_optimize4") - - for extension in ['py', 'pyc']: - try: - os.remove("opt4tab.{0}".format(extension)) - except OSError: - pass - - def test_lex_opt_alias(self): - try: - os.remove("aliastab.py") - except OSError: - pass - try: - os.remove("aliastab.pyc") - except OSError: - pass - try: - os.remove("aliastab.pyo") - except OSError: - pass - run_import("lex_opt_alias") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(+,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - self.assert_(os.path.exists("aliastab.py")) - - p = subprocess.Popen([sys.executable,'-O','lex_opt_alias.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(+,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - if test_pyo: - self.assert_(pymodule_out_exists("aliastab.pyo", 1)) - pymodule_out_remove("aliastab.pyo", 1) - - p = subprocess.Popen([sys.executable,'-OO','lex_opt_alias.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(NUMBER,3,1,0)\n" - "(+,'+',1,1)\n" - "(NUMBER,4,1,2)\n")) - - if test_pyo: - self.assert_(pymodule_out_exists("aliastab.pyo", 2)) - try: - os.remove("aliastab.py") - except OSError: - pass - try: - pymodule_out_remove("aliastab.pyc") - except OSError: - pass - try: - pymodule_out_remove("aliastab.pyo", 2) - except OSError: - pass - - def test_lex_many_tokens(self): - try: - os.remove("manytab.py") - except OSError: - pass - try: - os.remove("manytab.pyc") - except OSError: - pass - try: - os.remove("manytab.pyo") - except OSError: - pass - run_import("lex_many_tokens") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(TOK34,'TOK34:',1,0)\n" - "(TOK143,'TOK143:',1,7)\n" - "(TOK269,'TOK269:',1,15)\n" - "(TOK372,'TOK372:',1,23)\n" - "(TOK452,'TOK452:',1,31)\n" - "(TOK561,'TOK561:',1,39)\n" - "(TOK999,'TOK999:',1,47)\n" - )) - - self.assert_(os.path.exists("manytab.py")) - - if implementation() == 'CPython': - p = subprocess.Popen([sys.executable,'-O','lex_many_tokens.py'], - stdout=subprocess.PIPE) - result = p.stdout.read() - self.assert_(check_expected(result, - "(TOK34,'TOK34:',1,0)\n" - "(TOK143,'TOK143:',1,7)\n" - "(TOK269,'TOK269:',1,15)\n" - "(TOK372,'TOK372:',1,23)\n" - "(TOK452,'TOK452:',1,31)\n" - "(TOK561,'TOK561:',1,39)\n" - "(TOK999,'TOK999:',1,47)\n" - )) - - self.assert_(pymodule_out_exists("manytab.pyo", 1)) - pymodule_out_remove("manytab.pyo", 1) - try: - os.remove("manytab.py") - except OSError: - pass - try: - os.remove("manytab.pyc") - except OSError: - pass - try: - os.remove("manytab.pyo") - except OSError: - pass - -# Tests related to run-time behavior of lexers -class LexRunTests(unittest.TestCase): - def setUp(self): - sys.stderr = StringIO.StringIO() - sys.stdout = StringIO.StringIO() - def tearDown(self): - sys.stderr = sys.__stderr__ - sys.stdout = sys.__stdout__ - - def test_lex_hedit(self): - run_import("lex_hedit") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(H_EDIT_DESCRIPTOR,'abc',1,0)\n" - "(H_EDIT_DESCRIPTOR,'abcdefghij',1,6)\n" - "(H_EDIT_DESCRIPTOR,'xy',1,20)\n")) - - def test_lex_state_try(self): - run_import("lex_state_try") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "(NUMBER,'3',1,0)\n" - "(PLUS,'+',1,2)\n" - "(NUMBER,'4',1,4)\n" - "Entering comment state\n" - "comment body LexToken(body_part,'This is a comment */',1,9)\n" - "(PLUS,'+',1,30)\n" - "(NUMBER,'10',1,32)\n" - )) - - - -unittest.main() diff --git a/xonsh/ply/test/testyacc.py b/xonsh/ply/test/testyacc.py deleted file mode 100644 index 7e69f09..0000000 --- a/xonsh/ply/test/testyacc.py +++ /dev/null @@ -1,452 +0,0 @@ -# testyacc.py - -import unittest -try: - import StringIO -except ImportError: - import io as StringIO - -import sys -import os -import warnings -import re -import platform - -sys.path.insert(0,"..") -sys.tracebacklimit = 0 - -import ply.yacc - -def make_pymodule_path(filename): - path = os.path.dirname(filename) - file = os.path.basename(filename) - mod, ext = os.path.splitext(file) - - if sys.hexversion >= 0x3040000: - import importlib.util - fullpath = importlib.util.cache_from_source(filename, ext=='.pyc') - elif sys.hexversion >= 0x3020000: - import imp - modname = mod+"."+imp.get_tag()+ext - fullpath = os.path.join(path,'__pycache__',modname) - else: - fullpath = filename - return fullpath - -def pymodule_out_exists(filename): - return os.path.exists(make_pymodule_path(filename)) - -def pymodule_out_remove(filename): - os.remove(make_pymodule_path(filename)) - -def implementation(): - if platform.system().startswith("Java"): - return "Jython" - elif hasattr(sys, "pypy_version_info"): - return "PyPy" - else: - return "CPython" - -# Check the output to see if it contains all of a set of expected output lines. -# This alternate implementation looks weird, but is needed to properly handle -# some variations in error message order that occurs due to dict hash table -# randomization that was introduced in Python 3.3 -def check_expected(result, expected): - # Normalize 'state n' text to account for randomization effects in Python 3.3 - expected = re.sub(r' state \d+', 'state ', expected) - result = re.sub(r' state \d+', 'state ', result) - - resultlines = set() - for line in result.splitlines(): - if line.startswith("WARNING: "): - line = line[9:] - elif line.startswith("ERROR: "): - line = line[7:] - resultlines.add(line) - - # Selectively remove expected lines from the output - for eline in expected.splitlines(): - resultlines = set(line for line in resultlines if not line.endswith(eline)) - - # Return True if no result lines remain - return not bool(resultlines) - -def run_import(module): - code = "import "+module - exec(code) - del sys.modules[module] - -# Tests related to errors and warnings when building parsers -class YaccErrorWarningTests(unittest.TestCase): - def setUp(self): - sys.stderr = StringIO.StringIO() - sys.stdout = StringIO.StringIO() - try: - os.remove("parsetab.py") - pymodule_out_remove("parsetab.pyc") - except OSError: - pass - - if sys.hexversion >= 0x3020000: - warnings.filterwarnings('ignore', category=ResourceWarning) - warnings.filterwarnings('ignore', category=DeprecationWarning) - - def tearDown(self): - sys.stderr = sys.__stderr__ - sys.stdout = sys.__stdout__ - def test_yacc_badargs(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badargs") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_badargs.py:23: Rule 'p_statement_assign' has too many arguments\n" - "yacc_badargs.py:27: Rule 'p_statement_expr' requires an argument\n" - )) - def test_yacc_badid(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badid") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_badid.py:32: Illegal name 'bad&rule' in rule 'statement'\n" - "yacc_badid.py:36: Illegal rule name 'bad&rule'\n" - )) - - def test_yacc_badprec(self): - try: - run_import("yacc_badprec") - except ply.yacc.YaccError: - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "precedence must be a list or tuple\n" - )) - def test_yacc_badprec2(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badprec2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Bad precedence table\n" - )) - - def test_yacc_badprec3(self): - run_import("yacc_badprec3") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Precedence already specified for terminal 'MINUS'\n" - "Generating LALR tables\n" - - )) - - def test_yacc_badrule(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_badrule") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_badrule.py:24: Syntax error. Expected ':'\n" - "yacc_badrule.py:28: Syntax error in rule 'statement'\n" - "yacc_badrule.py:33: Syntax error. Expected ':'\n" - "yacc_badrule.py:42: Syntax error. Expected ':'\n" - )) - - def test_yacc_badtok(self): - try: - run_import("yacc_badtok") - except ply.yacc.YaccError: - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "tokens must be a list or tuple\n")) - - def test_yacc_dup(self): - run_import("yacc_dup") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_dup.py:27: Function p_statement redefined. Previously defined on line 23\n" - "Token 'EQUALS' defined, but not used\n" - "There is 1 unused token\n" - "Generating LALR tables\n" - - )) - def test_yacc_error1(self): - try: - run_import("yacc_error1") - except ply.yacc.YaccError: - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_error1.py:61: p_error() requires 1 argument\n")) - - def test_yacc_error2(self): - try: - run_import("yacc_error2") - except ply.yacc.YaccError: - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_error2.py:61: p_error() requires 1 argument\n")) - - def test_yacc_error3(self): - try: - run_import("yacc_error3") - except ply.yacc.YaccError: - e = sys.exc_info()[1] - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "'p_error' defined, but is not a function or method\n")) - - def test_yacc_error4(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_error4") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_error4.py:62: Illegal rule name 'error'. Already defined as a token\n" - )) - - - def test_yacc_error5(self): - run_import("yacc_error5") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "Group at 3:10 to 3:12\n" - "Undefined name 'a'\n" - "Syntax error at 'b'\n" - "Syntax error at 4:18 to 4:22\n" - "Assignment Error at 2:5 to 5:27\n" - "13\n" - )) - - def test_yacc_error6(self): - run_import("yacc_error6") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "a=7\n" - "Line 3: Syntax error at '*'\n" - "c=21\n" - )) - - def test_yacc_error7(self): - run_import("yacc_error7") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "a=7\n" - "Line 3: Syntax error at '*'\n" - "c=21\n" - )) - - def test_yacc_inf(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_inf") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Token 'NUMBER' defined, but not used\n" - "There is 1 unused token\n" - "Infinite recursion detected for symbol 'statement'\n" - "Infinite recursion detected for symbol 'expression'\n" - )) - def test_yacc_literal(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_literal") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_literal.py:36: Literal token '**' in rule 'expression' may only be a single character\n" - )) - def test_yacc_misplaced(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_misplaced") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_misplaced.py:32: Misplaced '|'\n" - )) - - def test_yacc_missing1(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_missing1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_missing1.py:24: Symbol 'location' used, but not defined as a token or a rule\n" - )) - - def test_yacc_nested(self): - run_import("yacc_nested") - result = sys.stdout.getvalue() - self.assert_(check_expected(result, - "A\n" - "A\n" - "A\n", - )) - - def test_yacc_nodoc(self): - run_import("yacc_nodoc") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_nodoc.py:27: No documentation string specified in function 'p_statement_expr' (ignored)\n" - "Generating LALR tables\n" - )) - - def test_yacc_noerror(self): - run_import("yacc_noerror") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "no p_error() function is defined\n" - "Generating LALR tables\n" - )) - - def test_yacc_nop(self): - run_import("yacc_nop") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_nop.py:27: Possible grammar rule 'statement_expr' defined without p_ prefix\n" - "Generating LALR tables\n" - )) - - def test_yacc_notfunc(self): - run_import("yacc_notfunc") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "'p_statement_assign' not defined as a function\n" - "Token 'EQUALS' defined, but not used\n" - "There is 1 unused token\n" - "Generating LALR tables\n" - )) - def test_yacc_notok(self): - try: - run_import("yacc_notok") - except ply.yacc.YaccError: - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "No token list is defined\n")) - - def test_yacc_rr(self): - run_import("yacc_rr") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Generating LALR tables\n" - "1 reduce/reduce conflict\n" - "reduce/reduce conflict in state 15 resolved using rule (statement -> NAME EQUALS NUMBER)\n" - "rejected rule (expression -> NUMBER) in state 15\n" - - )) - - def test_yacc_rr_unused(self): - run_import("yacc_rr_unused") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "no p_error() function is defined\n" - "Generating LALR tables\n" - "3 reduce/reduce conflicts\n" - "reduce/reduce conflict in state 1 resolved using rule (rule3 -> A)\n" - "rejected rule (rule4 -> A) in state 1\n" - "reduce/reduce conflict in state 1 resolved using rule (rule3 -> A)\n" - "rejected rule (rule5 -> A) in state 1\n" - "reduce/reduce conflict in state 1 resolved using rule (rule4 -> A)\n" - "rejected rule (rule5 -> A) in state 1\n" - "Rule (rule5 -> A) is never reduced\n" - )) - - def test_yacc_simple(self): - run_import("yacc_simple") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Generating LALR tables\n" - )) - - def test_yacc_sr(self): - run_import("yacc_sr") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Generating LALR tables\n" - "20 shift/reduce conflicts\n" - )) - - def test_yacc_term1(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_term1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_term1.py:24: Illegal rule name 'NUMBER'. Already defined as a token\n" - )) - - def test_yacc_unicode_literals(self): - run_import("yacc_unicode_literals") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Generating LALR tables\n" - )) - - def test_yacc_unused(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_unused") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_unused.py:62: Symbol 'COMMA' used, but not defined as a token or a rule\n" - "Symbol 'COMMA' is unreachable\n" - "Symbol 'exprlist' is unreachable\n" - )) - def test_yacc_unused_rule(self): - run_import("yacc_unused_rule") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_unused_rule.py:62: Rule 'integer' defined, but not used\n" - "There is 1 unused rule\n" - "Symbol 'integer' is unreachable\n" - "Generating LALR tables\n" - )) - - def test_yacc_uprec(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_uprec.py:37: Nothing known about the precedence of 'UMINUS'\n" - )) - - def test_yacc_uprec2(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_uprec2") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "yacc_uprec2.py:37: Syntax error. Nothing follows %prec\n" - )) - - def test_yacc_prec1(self): - self.assertRaises(ply.yacc.YaccError,run_import,"yacc_prec1") - result = sys.stderr.getvalue() - self.assert_(check_expected(result, - "Precedence rule 'left' defined for unknown symbol '+'\n" - "Precedence rule 'left' defined for unknown symbol '*'\n" - "Precedence rule 'left' defined for unknown symbol '-'\n" - "Precedence rule 'left' defined for unknown symbol '/'\n" - )) - - def test_pkg_test1(self): - from pkg_test1 import parser - self.assertTrue(os.path.exists('pkg_test1/parsing/parsetab.py')) - self.assertTrue(os.path.exists('pkg_test1/parsing/lextab.py')) - self.assertTrue(os.path.exists('pkg_test1/parsing/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - - def test_pkg_test2(self): - from pkg_test2 import parser - self.assertTrue(os.path.exists('pkg_test2/parsing/calcparsetab.py')) - self.assertTrue(os.path.exists('pkg_test2/parsing/calclextab.py')) - self.assertTrue(os.path.exists('pkg_test2/parsing/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - - def test_pkg_test3(self): - from pkg_test3 import parser - self.assertTrue(os.path.exists('pkg_test3/generated/parsetab.py')) - self.assertTrue(os.path.exists('pkg_test3/generated/lextab.py')) - self.assertTrue(os.path.exists('pkg_test3/generated/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - - def test_pkg_test4(self): - from pkg_test4 import parser - self.assertFalse(os.path.exists('pkg_test4/parsing/parsetab.py')) - self.assertFalse(os.path.exists('pkg_test4/parsing/lextab.py')) - self.assertFalse(os.path.exists('pkg_test4/parsing/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - - def test_pkg_test5(self): - from pkg_test5 import parser - self.assertTrue(os.path.exists('pkg_test5/parsing/parsetab.py')) - self.assertTrue(os.path.exists('pkg_test5/parsing/lextab.py')) - self.assertTrue(os.path.exists('pkg_test5/parsing/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - - def test_pkg_test6(self): - from pkg_test6 import parser - self.assertTrue(os.path.exists('pkg_test6/parsing/parsetab.py')) - self.assertTrue(os.path.exists('pkg_test6/parsing/lextab.py')) - self.assertTrue(os.path.exists('pkg_test6/parsing/parser.out')) - r = parser.parse('3+4+5') - self.assertEqual(r, 12) - -unittest.main() diff --git a/xonsh/ply/test/yacc_badargs.py b/xonsh/ply/test/yacc_badargs.py deleted file mode 100644 index 9a1d03f..0000000 --- a/xonsh/ply/test/yacc_badargs.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badargs.py -# -# Rules with wrong # args -# ----------------------------------------------------------------------------- -import sys -sys.tracebacklimit = 0 -sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t,s): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badid.py b/xonsh/ply/test/yacc_badid.py deleted file mode 100644 index e4b9f5e..0000000 --- a/xonsh/ply/test/yacc_badid.py +++ /dev/null @@ -1,77 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badid.py -# -# Attempt to define a rule with a bad-identifier name -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_statement_expr2(t): - 'statement : bad&rule' - pass - -def p_badrule(t): - 'bad&rule : expression' - pass - - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - pass - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badprec.py b/xonsh/ply/test/yacc_badprec.py deleted file mode 100644 index 3013bb6..0000000 --- a/xonsh/ply/test/yacc_badprec.py +++ /dev/null @@ -1,64 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badprec.py -# -# Bad precedence specifier -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = "blah" - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badprec2.py b/xonsh/ply/test/yacc_badprec2.py deleted file mode 100644 index 83093b4..0000000 --- a/xonsh/ply/test/yacc_badprec2.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badprec2.py -# -# Bad precedence -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - 42, - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badprec3.py b/xonsh/ply/test/yacc_badprec3.py deleted file mode 100644 index d925ecd..0000000 --- a/xonsh/ply/test/yacc_badprec3.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badprec3.py -# -# Bad precedence -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE','MINUS'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[3] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badrule.py b/xonsh/ply/test/yacc_badrule.py deleted file mode 100644 index 92af646..0000000 --- a/xonsh/ply/test/yacc_badrule.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badrule.py -# -# Syntax problems in the rule strings -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression: MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_badtok.py b/xonsh/ply/test/yacc_badtok.py deleted file mode 100644 index fc4afe1..0000000 --- a/xonsh/ply/test/yacc_badtok.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_badtok.py -# -# A grammar, but tokens is a bad datatype -# ----------------------------------------------------------------------------- - -import sys -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -tokens = "Hello" - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_dup.py b/xonsh/ply/test/yacc_dup.py deleted file mode 100644 index 309ba32..0000000 --- a/xonsh/ply/test/yacc_dup.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_dup.py -# -# Duplicated rule name -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_error1.py b/xonsh/ply/test/yacc_error1.py deleted file mode 100644 index 10ac6a9..0000000 --- a/xonsh/ply/test/yacc_error1.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error1.py -# -# Bad p_error() function -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t,s): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_error2.py b/xonsh/ply/test/yacc_error2.py deleted file mode 100644 index 7591418..0000000 --- a/xonsh/ply/test/yacc_error2.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error2.py -# -# Bad p_error() function -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_error3.py b/xonsh/ply/test/yacc_error3.py deleted file mode 100644 index 4604a48..0000000 --- a/xonsh/ply/test/yacc_error3.py +++ /dev/null @@ -1,67 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error3.py -# -# Bad p_error() function -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -p_error = "blah" - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_error4.py b/xonsh/ply/test/yacc_error4.py deleted file mode 100644 index 9c550cd..0000000 --- a/xonsh/ply/test/yacc_error4.py +++ /dev/null @@ -1,72 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error4.py -# -# Attempt to define a rule named 'error' -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error_handler(t): - 'error : NAME' - pass - -def p_error(t): - pass - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_error5.py b/xonsh/ply/test/yacc_error5.py deleted file mode 100644 index 9eb0f85..0000000 --- a/xonsh/ply/test/yacc_error5.py +++ /dev/null @@ -1,94 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error5.py -# -# Lineno and position tracking with error tokens -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_assign_error(t): - 'statement : NAME EQUALS error' - line_start, line_end = t.linespan(3) - pos_start, pos_end = t.lexspan(3) - print("Assignment Error at %d:%d to %d:%d" % (line_start,pos_start,line_end,pos_end)) - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - line_start, line_end = t.linespan(2) - pos_start, pos_end = t.lexspan(2) - print("Group at %d:%d to %d:%d" % (line_start,pos_start, line_end, pos_end)) - t[0] = t[2] - -def p_expression_group_error(t): - 'expression : LPAREN error RPAREN' - line_start, line_end = t.linespan(2) - pos_start, pos_end = t.lexspan(2) - print("Syntax error at %d:%d to %d:%d" % (line_start,pos_start, line_end, pos_end)) - t[0] = 0 - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -parser = yacc.yacc() -import calclex -calclex.lexer.lineno=1 -parser.parse(""" -a = 3 + -(4*5) + -(a b c) + -+ 6 + 7 -""", tracking=True) - - - - - - diff --git a/xonsh/ply/test/yacc_error6.py b/xonsh/ply/test/yacc_error6.py deleted file mode 100644 index 8d0ec85..0000000 --- a/xonsh/ply/test/yacc_error6.py +++ /dev/null @@ -1,80 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error6.py -# -# Panic mode recovery test -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -def p_statements(t): - 'statements : statements statement' - pass - -def p_statements_1(t): - 'statements : statement' - pass - -def p_statement_assign(p): - 'statement : LPAREN NAME EQUALS expression RPAREN' - print("%s=%s" % (p[2],p[4])) - -def p_statement_expr(t): - 'statement : LPAREN expression RPAREN' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_error(p): - if p: - print("Line %d: Syntax error at '%s'" % (p.lineno, p.value)) - # Scan ahead looking for a name token - while True: - tok = parser.token() - if not tok or tok.type == 'RPAREN': - break - if tok: - parser.restart() - return None - -parser = yacc.yacc() -import calclex -calclex.lexer.lineno=1 - -parser.parse(""" -(a = 3 + 4) -(b = 4 + * 5 - 6 + *) -(c = 10 + 11) -""") - - - - - - diff --git a/xonsh/ply/test/yacc_error7.py b/xonsh/ply/test/yacc_error7.py deleted file mode 100644 index fb131be..0000000 --- a/xonsh/ply/test/yacc_error7.py +++ /dev/null @@ -1,80 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_error7.py -# -# Panic mode recovery test using deprecated functionality -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -def p_statements(t): - 'statements : statements statement' - pass - -def p_statements_1(t): - 'statements : statement' - pass - -def p_statement_assign(p): - 'statement : LPAREN NAME EQUALS expression RPAREN' - print("%s=%s" % (p[2],p[4])) - -def p_statement_expr(t): - 'statement : LPAREN expression RPAREN' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_error(p): - if p: - print("Line %d: Syntax error at '%s'" % (p.lineno, p.value)) - # Scan ahead looking for a name token - while True: - tok = yacc.token() - if not tok or tok.type == 'RPAREN': - break - if tok: - yacc.restart() - return None - -parser = yacc.yacc() -import calclex -calclex.lexer.lineno=1 - -parser.parse(""" -(a = 3 + 4) -(b = 4 + * 5 - 6 + *) -(c = 10 + 11) -""") - - - - - - diff --git a/xonsh/ply/test/yacc_inf.py b/xonsh/ply/test/yacc_inf.py deleted file mode 100644 index efd3612..0000000 --- a/xonsh/ply/test/yacc_inf.py +++ /dev/null @@ -1,56 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_inf.py -# -# Infinite recursion -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_literal.py b/xonsh/ply/test/yacc_literal.py deleted file mode 100644 index 0d62803..0000000 --- a/xonsh/ply/test/yacc_literal.py +++ /dev/null @@ -1,69 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_literal.py -# -# Grammar with bad literal characters -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','+','-'), - ('left','*','/'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression '+' expression - | expression '-' expression - | expression '*' expression - | expression '/' expression - | expression '**' expression ''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_misplaced.py b/xonsh/ply/test/yacc_misplaced.py deleted file mode 100644 index 9159b01..0000000 --- a/xonsh/ply/test/yacc_misplaced.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_misplaced.py -# -# A misplaced | in grammar rules -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - ''' | expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_missing1.py b/xonsh/ply/test/yacc_missing1.py deleted file mode 100644 index d1b5105..0000000 --- a/xonsh/ply/test/yacc_missing1.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_missing1.py -# -# Grammar with a missing rule -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : location EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_nested.py b/xonsh/ply/test/yacc_nested.py deleted file mode 100644 index a1b061e..0000000 --- a/xonsh/ply/test/yacc_nested.py +++ /dev/null @@ -1,33 +0,0 @@ -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") - -from ply import lex, yacc - -t_A = 'A' -t_B = 'B' -t_C = 'C' - -tokens = ('A', 'B', 'C') - -the_lexer = lex.lex() - -def t_error(t): - pass - -def p_error(p): - pass - -def p_start(t): - '''start : A nest C''' - pass - -def p_nest(t): - '''nest : B''' - print(t[-1]) - -the_parser = yacc.yacc(debug = False, write_tables = False) - -the_parser.parse('ABC', the_lexer) -the_parser.parse('ABC', the_lexer, tracking=True) -the_parser.parse('ABC', the_lexer, tracking=True, debug=1) diff --git a/xonsh/ply/test/yacc_nodoc.py b/xonsh/ply/test/yacc_nodoc.py deleted file mode 100644 index 0f61920..0000000 --- a/xonsh/ply/test/yacc_nodoc.py +++ /dev/null @@ -1,67 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_nodoc.py -# -# Rule with a missing doc-string -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_noerror.py b/xonsh/ply/test/yacc_noerror.py deleted file mode 100644 index b38c758..0000000 --- a/xonsh/ply/test/yacc_noerror.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_noerror.py -# -# No p_error() rule defined. -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_nop.py b/xonsh/ply/test/yacc_nop.py deleted file mode 100644 index 789a9cf..0000000 --- a/xonsh/ply/test/yacc_nop.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_nop.py -# -# Possible grammar rule defined without p_ prefix -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_notfunc.py b/xonsh/ply/test/yacc_notfunc.py deleted file mode 100644 index 5093a74..0000000 --- a/xonsh/ply/test/yacc_notfunc.py +++ /dev/null @@ -1,66 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_notfunc.py -# -# p_rule not defined as a function -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -p_statement_assign = "Blah" - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_notok.py b/xonsh/ply/test/yacc_notok.py deleted file mode 100644 index cff55a8..0000000 --- a/xonsh/ply/test/yacc_notok.py +++ /dev/null @@ -1,67 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_notok.py -# -# A grammar, but we forgot to import the tokens list -# ----------------------------------------------------------------------------- - -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_prec1.py b/xonsh/ply/test/yacc_prec1.py deleted file mode 100644 index 99fcd90..0000000 --- a/xonsh/ply/test/yacc_prec1.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_prec1.py -# -# Tests case where precedence specifier doesn't match up to terminals -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left', '+', '-'), - ('left', '*', '/'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_rr.py b/xonsh/ply/test/yacc_rr.py deleted file mode 100644 index e7336c2..0000000 --- a/xonsh/ply/test/yacc_rr.py +++ /dev/null @@ -1,72 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_rr.py -# -# A grammar with a reduce/reduce conflict -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_assign_2(t): - 'statement : NAME EQUALS NUMBER' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_rr_unused.py b/xonsh/ply/test/yacc_rr_unused.py deleted file mode 100644 index 1ca5f7e..0000000 --- a/xonsh/ply/test/yacc_rr_unused.py +++ /dev/null @@ -1,30 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_rr_unused.py -# -# A grammar with reduce/reduce conflicts and a rule that never -# gets reduced. -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -tokens = ('A', 'B', 'C') - -def p_grammar(p): - ''' - rule1 : rule2 B - | rule2 C - - rule2 : rule3 B - | rule4 - | rule5 - - rule3 : A - - rule4 : A - - rule5 : A - ''' - -yacc.yacc() diff --git a/xonsh/ply/test/yacc_simple.py b/xonsh/ply/test/yacc_simple.py deleted file mode 100644 index bd989f4..0000000 --- a/xonsh/ply/test/yacc_simple.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_simple.py -# -# A simple, properly specifier grammar -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_sr.py b/xonsh/ply/test/yacc_sr.py deleted file mode 100644 index 69a1e9c..0000000 --- a/xonsh/ply/test/yacc_sr.py +++ /dev/null @@ -1,63 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_sr.py -# -# A grammar with shift-reduce conflicts -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_term1.py b/xonsh/ply/test/yacc_term1.py deleted file mode 100644 index eaa36e9..0000000 --- a/xonsh/ply/test/yacc_term1.py +++ /dev/null @@ -1,68 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_term1.py -# -# Terminal used on the left-hand-side -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'NUMBER : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_unicode_literals.py b/xonsh/ply/test/yacc_unicode_literals.py deleted file mode 100644 index 5ae4f5b..0000000 --- a/xonsh/ply/test/yacc_unicode_literals.py +++ /dev/null @@ -1,70 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_unicode_literals -# -# Test for unicode literals on Python 2.x -# ----------------------------------------------------------------------------- -from __future__ import unicode_literals - -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_unused.py b/xonsh/ply/test/yacc_unused.py deleted file mode 100644 index 55b677b..0000000 --- a/xonsh/ply/test/yacc_unused.py +++ /dev/null @@ -1,77 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_unused.py -# -# A grammar with an unused rule -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_expr_list(t): - 'exprlist : exprlist COMMA expression' - pass - -def p_expr_list_2(t): - 'exprlist : expression' - pass - - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_unused_rule.py b/xonsh/ply/test/yacc_unused_rule.py deleted file mode 100644 index 4868ef8..0000000 --- a/xonsh/ply/test/yacc_unused_rule.py +++ /dev/null @@ -1,72 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_unused_rule.py -# -# Grammar with an unused rule -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules -precedence = ( - ('left','PLUS','MINUS'), - ('left','TIMES','DIVIDE'), - ('right','UMINUS'), - ) - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_integer(t): - 'integer : NUMBER' - t[0] = t[1] - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_uprec.py b/xonsh/ply/test/yacc_uprec.py deleted file mode 100644 index 569adb8..0000000 --- a/xonsh/ply/test/yacc_uprec.py +++ /dev/null @@ -1,63 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_uprec.py -# -# A grammar with a bad %prec specifier -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec UMINUS' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/ply/test/yacc_uprec2.py b/xonsh/ply/test/yacc_uprec2.py deleted file mode 100644 index 73274bf..0000000 --- a/xonsh/ply/test/yacc_uprec2.py +++ /dev/null @@ -1,63 +0,0 @@ -# ----------------------------------------------------------------------------- -# yacc_uprec2.py -# -# A grammar with a bad %prec specifier -# ----------------------------------------------------------------------------- -import sys - -if ".." not in sys.path: sys.path.insert(0,"..") -import ply.yacc as yacc - -from calclex import tokens - -# Parsing rules - -# dictionary of names -names = { } - -def p_statement_assign(t): - 'statement : NAME EQUALS expression' - names[t[1]] = t[3] - -def p_statement_expr(t): - 'statement : expression' - print(t[1]) - -def p_expression_binop(t): - '''expression : expression PLUS expression - | expression MINUS expression - | expression TIMES expression - | expression DIVIDE expression''' - if t[2] == '+' : t[0] = t[1] + t[3] - elif t[2] == '-': t[0] = t[1] - t[3] - elif t[2] == '*': t[0] = t[1] * t[3] - elif t[2] == '/': t[0] = t[1] / t[3] - -def p_expression_uminus(t): - 'expression : MINUS expression %prec' - t[0] = -t[2] - -def p_expression_group(t): - 'expression : LPAREN expression RPAREN' - t[0] = t[2] - -def p_expression_number(t): - 'expression : NUMBER' - t[0] = t[1] - -def p_expression_name(t): - 'expression : NAME' - try: - t[0] = names[t[1]] - except LookupError: - print("Undefined name '%s'" % t[1]) - t[0] = 0 - -def p_error(t): - print("Syntax error at '%s'" % t.value) - -yacc.yacc() - - - - diff --git a/xonsh/pretty.py b/xonsh/pretty.py index f00423c..faae0d2 100644 --- a/xonsh/pretty.py +++ b/xonsh/pretty.py @@ -12,13 +12,14 @@ * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray + Example Usage ------------- To directly print the representation of an object use `pprint`:: - from pretty import pretty_print - pretty_pprint(complex_object) + from pretty import pprint + pprint(complex_object) To get a string of the output use `pretty`:: @@ -68,37 +69,39 @@ def _repr_pretty_(self, p, cycle): opening bracket of `MyList`. If you just want to indent something you can use the group function -without open / close parameters. You can also use this code:: +without open / close parameters. Yu can also use this code:: with p.indent(2): ... +Inheritance diagram: + +.. inheritance-diagram:: IPython.lib.pretty + :parts: 3 :copyright: 2007 by Armin Ronacher. Portions (c) 2009 by Robert Kern. :license: BSD License. """ -import io -import re +from contextlib import contextmanager import sys import types +import re import datetime -import contextlib -import collections +from collections import deque + +# from IPython.utils.py3compat import PY3, cast_unicode +# from IPython.utils.encoding import get_stream_enc -from xonsh.lazyasd import LazyObject, lazyobject +from io import StringIO -__all__ = [ - "pretty", - "pretty_print", - "PrettyPrinter", - "RepresentationPrinter", - "for_type", - "for_type_by_name", -] + +__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter', + 'for_type', 'for_type_by_name'] MAX_SEQ_LENGTH = 1000 +_re_pattern_type = type(re.compile('')) def _safe_getattr(obj, attr, default=None): @@ -112,45 +115,40 @@ def _safe_getattr(obj, attr, default=None): except Exception: return default - -CUnicodeIO = io.StringIO +# if PY3: +CUnicodeIO = StringIO +# else: + # class CUnicodeIO(StringIO): + # """StringIO that casts str to unicode on Python 2""" + # def write(self, text): + # return super(CUnicodeIO, self).write( + # cast_unicode(text, encoding=get_stream_enc(sys.stdout))) -def pretty( - obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH -): +def pretty(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): """ Pretty print the object's representation. """ - if hasattr(obj, "xonsh_display"): - return obj.xonsh_display() - stream = CUnicodeIO() - printer = RepresentationPrinter( - stream, verbose, max_width, newline, max_seq_length=max_seq_length - ) + printer = RepresentationPrinter(stream, verbose, max_width, newline, max_seq_length) printer.pretty(obj) printer.flush() return stream.getvalue() -def pretty_print( - obj, verbose=False, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH -): +def pprint(obj, verbose=False, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): """ - Like pretty() but print to stdout. + Like `pretty` but print to stdout. """ - printer = RepresentationPrinter( - sys.stdout, verbose, max_width, newline, max_seq_length=max_seq_length - ) + printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline, max_seq_length) printer.pretty(obj) printer.flush() sys.stdout.write(newline) sys.stdout.flush() - class _PrettyPrinterBase(object): - @contextlib.contextmanager + + @contextmanager def indent(self, indent): """with statement support for indenting/dedenting.""" self.indentation += indent @@ -159,14 +157,14 @@ def indent(self, indent): finally: self.indentation -= indent - @contextlib.contextmanager - def group(self, indent=0, open="", close=""): + @contextmanager + def group(self, indent=0, gopen='', gclose=''): """like begin_group / end_group but for the with statement.""" - self.begin_group(indent, open) + self.begin_group(indent, gopen) try: yield finally: - self.end_group(indent, close) + self.end_group(indent, gclose) class PrettyPrinter(_PrettyPrinterBase): @@ -177,16 +175,14 @@ class PrettyPrinter(_PrettyPrinterBase): callback method. """ - def __init__( - self, output, max_width=79, newline="\n", max_seq_length=MAX_SEQ_LENGTH - ): + def __init__(self, output, max_width=79, newline='\n', max_seq_length=MAX_SEQ_LENGTH): self.output = output self.max_width = max_width self.newline = newline self.max_seq_length = max_seq_length self.output_width = 0 self.buffer_width = 0 - self.buffer = collections.deque() + self.buffer = deque() root_group = Group(0) self.group_stack = [root_group] @@ -222,7 +218,7 @@ def text(self, obj): self.output.write(obj) self.output_width += width - def breakable(self, sep=" "): + def breakable(self, sep=' '): """ Add a breakable separator to the output. This does not mean that it will automatically break here. If no breaking on this position takes @@ -233,7 +229,7 @@ def breakable(self, sep=" "): if group.want_break: self.flush() self.output.write(self.newline) - self.output.write(" " * self.indentation) + self.output.write(' ' * self.indentation) self.output_width = self.indentation self.buffer_width = 0 else: @@ -247,11 +243,11 @@ def break_(self): """ self.flush() self.output.write(self.newline) - self.output.write(" " * self.indentation) + self.output.write(' ' * self.indentation) self.output_width = self.indentation self.buffer_width = 0 - def begin_group(self, indent=0, open=""): + def begin_group(self, indent=0, gopen=''): """ Begin a group. If you want support for python < 2.5 which doesn't has the with statement this is the preferred way: @@ -269,8 +265,8 @@ def begin_group(self, indent=0, open=""): the width of the opening text), the second the opening text. All parameters are optional. """ - if open: - self.text(open) + if gopen: + self.text(gopen) group = Group(self.group_stack[-1].depth + 1) self.group_stack.append(group) self.group_queue.enq(group) @@ -280,20 +276,20 @@ def _enumerate(self, seq): """like enumerate, but with an upper limit on the number of items""" for idx, x in enumerate(seq): if self.max_seq_length and idx >= self.max_seq_length: - self.text(",") + self.text(',') self.breakable() - self.text("...") - return + self.text('...') + raise StopIteration yield idx, x - def end_group(self, dedent=0, close=""): + def end_group(self, dedent=0, gclose=''): """End a group. See `begin_group` for more details.""" self.indentation -= dedent group = self.group_stack.pop() if not group.breakables: self.group_queue.remove(group) - if close: - self.text(close) + if gclose: + self.text(gclose) def flush(self): """Flush data that is left in the buffer.""" @@ -307,7 +303,7 @@ def _get_mro(obj_class): """ Get a reasonable method resolution order of a class and its superclasses for both old-style and new-style classes. """ - if not hasattr(obj_class, "__mro__"): + if not hasattr(obj_class, '__mro__'): # Old-style class. Mix in object to make a fake new-style class. try: obj_class = type(obj_class.__name__, (obj_class, object), {}) @@ -328,7 +324,7 @@ class RepresentationPrinter(PrettyPrinter): printer for a python object. This class stores processing data on `self` so you must *never* use - this class in a threaded environment. Always lock it or reinstantiate + this class in a threaded environment. Always lock it or reinstanciate it. Instances also have a verbose flag callbacks can access to control their @@ -337,21 +333,11 @@ class RepresentationPrinter(PrettyPrinter): verbose mode. """ - def __init__( - self, - output, - verbose=False, - max_width=79, - newline="\n", - singleton_pprinters=None, - type_pprinters=None, - deferred_pprinters=None, - max_seq_length=MAX_SEQ_LENGTH, - ): - - PrettyPrinter.__init__( - self, output, max_width, newline, max_seq_length=max_seq_length - ) + def __init__(self, output, verbose=False, max_width=79, newline='\n', + singleton_pprinters=None, type_pprinters=None, deferred_pprinters=None, + max_seq_length=MAX_SEQ_LENGTH): + + PrettyPrinter.__init__(self, output, max_width, newline, max_seq_length=max_seq_length) self.verbose = verbose self.stack = [] if singleton_pprinters is None: @@ -371,7 +357,7 @@ def pretty(self, obj): self.stack.append(obj_id) self.begin_group() try: - obj_class = _safe_getattr(obj, "__class__", None) or type(obj) + obj_class = _safe_getattr(obj, '__class__', None) or type(obj) # First try to find registered singleton printers for the type. try: printer = self.singleton_pprinters[obj_id] @@ -396,7 +382,7 @@ def pretty(self, obj): # Some objects automatically create any requested # attribute. Try to ignore most of them by checking for # callability. - if "_repr_pretty_" in cls.__dict__: + if '_repr_pretty_' in cls.__dict__: meth = cls._repr_pretty_ if callable(meth): return meth(obj, self, cycle) @@ -413,8 +399,8 @@ def _in_deferred_types(self, cls): class is not in the registry. Successful matches will be moved to the regular type registry for future use. """ - mod = _safe_getattr(cls, "__module__", None) - name = _safe_getattr(cls, "__name__", None) + mod = _safe_getattr(cls, '__module__', None) + name = _safe_getattr(cls, '__name__', None) key = (mod, name) printer = None if key in self.deferred_pprinters: @@ -425,11 +411,13 @@ class is not in the registry. Successful matches will be moved to the class Printable(object): + def output(self, stream, output_width): return output_width class Text(Printable): + def __init__(self): self.objs = [] self.width = 0 @@ -445,6 +433,7 @@ def add(self, obj, width): class Breakable(Printable): + def __init__(self, seq, width, pretty): self.obj = seq self.width = width @@ -457,7 +446,7 @@ def output(self, stream, output_width): self.group.breakables.popleft() if self.group.want_break: stream.write(self.pretty.newline) - stream.write(" " * self.indentation) + stream.write(' ' * self.indentation) return self.indentation if not self.group.breakables: self.pretty.group_queue.remove(self.group) @@ -466,13 +455,15 @@ def output(self, stream, output_width): class Group(Printable): + def __init__(self, depth): self.depth = depth - self.breakables = collections.deque() + self.breakables = deque() self.want_break = False class GroupQueue(object): + def __init__(self, *groups): self.queue = [] for group in groups: @@ -501,14 +492,10 @@ def remove(self, group): except ValueError: pass - -@lazyobject -def _baseclass_reprs(): - try: - br = (object.__repr__, types.InstanceType.__repr__) - except AttributeError: # Python 3 - br = (object.__repr__,) - return br +try: + _baseclass_reprs = (object.__repr__, types.InstanceType.__repr__) +except AttributeError: # Python 3 + _baseclass_reprs = (object.__repr__,) def _default_pprint(obj, p, cycle): @@ -516,20 +503,20 @@ def _default_pprint(obj, p, cycle): The default print function. Used if an object does not provide one and it's none of the builtin objects. """ - klass = _safe_getattr(obj, "__class__", None) or type(obj) - if _safe_getattr(klass, "__repr__", None) not in _baseclass_reprs: + klass = _safe_getattr(obj, '__class__', None) or type(obj) + if _safe_getattr(klass, '__repr__', None) not in _baseclass_reprs: # A user-provided repr. Find newlines and replace them with p.break_() _repr_pprint(obj, p, cycle) return - p.begin_group(1, "<") + p.begin_group(1, '<') p.pretty(klass) - p.text(" at 0x%x" % id(obj)) + p.text(' at 0x%x' % id(obj)) if cycle: - p.text(" ...") + p.text(' ...') elif p.verbose: first = True for key in dir(obj): - if not key.startswith("_"): + if not key.startswith('_'): try: value = getattr(obj, key) except AttributeError: @@ -537,16 +524,16 @@ def _default_pprint(obj, p, cycle): if isinstance(value, types.MethodType): continue if not first: - p.text(",") + p.text(',') p.breakable() p.text(key) - p.text("=") + p.text('=') step = len(key) + 1 p.indentation += step p.pretty(value) p.indentation -= step first = False - p.end_group(1, ">") + p.end_group(1, '>') def _seq_pprinter_factory(start, end, basetype): @@ -554,31 +541,25 @@ def _seq_pprinter_factory(start, end, basetype): Factory that returns a pprint function useful for sequences. Used by the default pprint for tuples, dicts, and lists. """ - def inner(obj, p, cycle): typ = type(obj) - if ( - basetype is not None - and typ is not basetype - and typ.__repr__ != basetype.__repr__ - ): + if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: - return p.text(start + "..." + end) + return p.text(start + '...' + end) step = len(start) p.begin_group(step, start) for idx, x in p._enumerate(obj): if idx: - p.text(",") + p.text(',') p.breakable() p.pretty(x) if len(obj) == 1 and type(obj) is tuple: # Special case for 1-item tuples. - p.text(",") + p.text(',') p.end_group(step, end) - return inner @@ -586,22 +567,17 @@ def _set_pprinter_factory(start, end, basetype): """ Factory that returns a pprint function useful for sets and frozensets. """ - def inner(obj, p, cycle): typ = type(obj) - if ( - basetype is not None - and typ is not basetype - and typ.__repr__ != basetype.__repr__ - ): + if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: - return p.text(start + "..." + end) + return p.text(start + '...' + end) if len(obj) == 0: # Special case. - p.text(basetype.__name__ + "()") + p.text(basetype.__name__ + '()') else: step = len(start) p.begin_group(step, start) @@ -615,11 +591,10 @@ def inner(obj, p, cycle): pass for idx, x in p._enumerate(items): if idx: - p.text(",") + p.text(',') p.breakable() p.pretty(x) p.end_group(step, end) - return inner @@ -628,19 +603,14 @@ def _dict_pprinter_factory(start, end, basetype=None): Factory that returns a pprint function used by the default pprint of dicts and dict proxies. """ - def inner(obj, p, cycle): typ = type(obj) - if ( - basetype is not None - and typ is not basetype - and typ.__repr__ != basetype.__repr__ - ): + if basetype is not None and typ is not basetype and typ.__repr__ != basetype.__repr__: # If the subclass provides its own repr, use it instead. return p.text(typ.__repr__(obj)) if cycle: - return p.text("{...}") + return p.text('{...}') p.begin_group(1, start) keys = obj.keys() # if dict isn't large enough to be truncated, sort keys before displaying @@ -652,57 +622,48 @@ def inner(obj, p, cycle): pass for idx, key in p._enumerate(keys): if idx: - p.text(",") + p.text(',') p.breakable() p.pretty(key) - p.text(": ") + p.text(': ') p.pretty(obj[key]) p.end_group(1, end) - return inner def _super_pprint(obj, p, cycle): """The pprint for the super type.""" - p.begin_group(8, "") + p.end_group(8, '>') def _re_pattern_pprint(obj, p, cycle): """The pprint function for regular expression patterns.""" - p.text("re.compile(") + p.text('re.compile(') pattern = repr(obj.pattern) - if pattern[:1] in "uU": + if pattern[:1] in 'uU': pattern = pattern[1:] - prefix = "ur" + prefix = 'ur' else: - prefix = "r" - pattern = prefix + pattern.replace("\\\\", "\\") + prefix = 'r' + pattern = prefix + pattern.replace('\\\\', '\\') p.text(pattern) if obj.flags: - p.text(",") + p.text(',') p.breakable() done_one = False - for flag in ( - "TEMPLATE", - "IGNORECASE", - "LOCALE", - "MULTILINE", - "DOTALL", - "UNICODE", - "VERBOSE", - "DEBUG", - ): + for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL', + 'UNICODE', 'VERBOSE', 'DEBUG'): if obj.flags & getattr(re, flag): if done_one: - p.text("|") - p.text("re." + flag) + p.text('|') + p.text('re.' + flag) done_one = True - p.text(")") + p.text(')') def _type_pprint(obj, p, cycle): @@ -715,22 +676,13 @@ def _type_pprint(obj, p, cycle): _repr_pprint(obj, p, cycle) return - mod = _safe_getattr(obj, "__module__", None) - try: - name = obj.__qualname__ - if not isinstance(name, str): - # This can happen if the type implements __qualname__ as a property - # or other descriptor in Python 2. - raise Exception("Try __name__") - except Exception: - name = obj.__name__ - if not isinstance(name, str): - name = "" + mod = _safe_getattr(obj, '__module__', None) + name = _safe_getattr(obj, '__qualname__', obj.__name__) - if mod in (None, "__builtin__", "builtins", "exceptions"): + if mod in (None, '__builtin__', 'builtins', 'exceptions'): p.text(name) else: - p.text(mod + "." + name) + p.text(mod + '.' + name) def _repr_pprint(obj, p, cycle): @@ -745,80 +697,76 @@ def _repr_pprint(obj, p, cycle): def _function_pprint(obj, p, cycle): """Base pprint for all functions and builtin functions.""" - name = _safe_getattr(obj, "__qualname__", obj.__name__) + name = _safe_getattr(obj, '__qualname__', obj.__name__) mod = obj.__module__ - if mod and mod not in ("__builtin__", "builtins", "exceptions"): - name = mod + "." + name - p.text("" % name) + if mod and mod not in ('__builtin__', 'builtins', 'exceptions'): + name = mod + '.' + name + p.text('' % name) def _exception_pprint(obj, p, cycle): """Base pprint for all exceptions.""" - name = getattr(obj.__class__, "__qualname__", obj.__class__.__name__) - if obj.__class__.__module__ not in ("exceptions", "builtins"): - name = "%s.%s" % (obj.__class__.__module__, name) + name = getattr(obj.__class__, '__qualname__', obj.__class__.__name__) + if obj.__class__.__module__ not in ('exceptions', 'builtins'): + name = '%s.%s' % (obj.__class__.__module__, name) step = len(name) + 1 - p.begin_group(step, name + "(") - for idx, arg in enumerate(getattr(obj, "args", ())): + p.begin_group(step, name + '(') + for idx, arg in enumerate(getattr(obj, 'args', ())): if idx: - p.text(",") + p.text(',') p.breakable() p.pretty(arg) - p.end_group(step, ")") - - -@lazyobject -def _type_pprinters(): - #: printers for builtin types - tp = { - int: _repr_pprint, - float: _repr_pprint, - str: _repr_pprint, - tuple: _seq_pprinter_factory("(", ")", tuple), - list: _seq_pprinter_factory("[", "]", list), - dict: _dict_pprinter_factory("{", "}", dict), - set: _set_pprinter_factory("{", "}", set), - frozenset: _set_pprinter_factory("frozenset({", "})", frozenset), - super: _super_pprint, - type(re.compile("")): _re_pattern_pprint, - type: _type_pprint, - types.FunctionType: _function_pprint, - types.BuiltinFunctionType: _function_pprint, - types.MethodType: _repr_pprint, - datetime.datetime: _repr_pprint, - datetime.timedelta: _repr_pprint, - } - #: the exception base - try: - _exception_base = BaseException - except NameError: - _exception_base = Exception - tp[_exception_base] = _exception_pprint - try: - tp[types.DictProxyType] = _dict_pprinter_factory("") - tp[types.ClassType] = _type_pprint - tp[types.SliceType] = _repr_pprint - except AttributeError: # Python 3 - tp[slice] = _repr_pprint - try: - tp[xrange] = _repr_pprint - tp[long] = _repr_pprint - tp[unicode] = _repr_pprint - except NameError: - tp[range] = _repr_pprint - tp[bytes] = _repr_pprint - return tp - + p.end_group(step, ')') + + +#: the exception base +try: + _exception_base = BaseException +except NameError: + _exception_base = Exception + + +#: printers for builtin types +_type_pprinters = { + int: _repr_pprint, + float: _repr_pprint, + str: _repr_pprint, + tuple: _seq_pprinter_factory('(', ')', tuple), + list: _seq_pprinter_factory('[', ']', list), + dict: _dict_pprinter_factory('{', '}', dict), + + set: _set_pprinter_factory('{', '}', set), + frozenset: _set_pprinter_factory('frozenset({', '})', frozenset), + super: _super_pprint, + _re_pattern_type: _re_pattern_pprint, + type: _type_pprint, + types.FunctionType: _function_pprint, + types.BuiltinFunctionType: _function_pprint, + types.MethodType: _repr_pprint, + + datetime.datetime: _repr_pprint, + datetime.timedelta: _repr_pprint, + _exception_base: _exception_pprint +} + +try: + _type_pprinters[types.DictProxyType] = _dict_pprinter_factory('') + _type_pprinters[types.ClassType] = _type_pprint + _type_pprinters[types.SliceType] = _repr_pprint +except AttributeError: # Python 3 + _type_pprinters[slice] = _repr_pprint + +try: + _type_pprinters[xrange] = _repr_pprint + _type_pprinters[long] = _repr_pprint + _type_pprinters[unicode] = _repr_pprint +except NameError: + _type_pprinters[range] = _repr_pprint + _type_pprinters[bytes] = _repr_pprint #: printers for types specified by name -@lazyobject -def _deferred_type_pprinters(): - dtp = {} - for_type_by_name("collections", "defaultdict", _defaultdict_pprint, dtp=dtp) - for_type_by_name("collections", "OrderedDict", _ordereddict_pprint, dtp=dtp) - for_type_by_name("collections", "deque", _deque_pprint, dtp=dtp) - for_type_by_name("collections", "Counter", _counter_pprint, dtp=dtp) - return dtp +_deferred_type_pprinters = { +} def for_type(typ, func): @@ -831,66 +779,79 @@ def for_type(typ, func): _type_pprinters[typ] = func return oldfunc - -def for_type_by_name(type_module, type_name, func, dtp=None): +def for_type_by_name(type_module, type_name, func): """ Add a pretty printer for a type specified by the module and name of a type rather than the type object itself. """ - if dtp is None: - dtp = _deferred_type_pprinters key = (type_module, type_name) - oldfunc = dtp.get(key, None) + oldfunc = _deferred_type_pprinters.get(key, None) if func is not None: # To support easy restoration of old pprinters, we need to ignore Nones. - dtp[key] = func + _deferred_type_pprinters[key] = func return oldfunc #: printers for the default singletons -_singleton_pprinters = LazyObject( - lambda: dict.fromkeys( - map(id, [None, True, False, Ellipsis, NotImplemented]), _repr_pprint - ), - globals(), - "_singleton_pprinters", -) +_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis, + NotImplemented]), _repr_pprint) def _defaultdict_pprint(obj, p, cycle): - name = obj.__class__.__name__ - with p.group(len(name) + 1, name + "(", ")"): + name = 'defaultdict' + with p.group(len(name) + 1, name + '(', ')'): if cycle: - p.text("...") + p.text('...') else: p.pretty(obj.default_factory) - p.text(",") + p.text(',') p.breakable() p.pretty(dict(obj)) def _ordereddict_pprint(obj, p, cycle): - name = obj.__class__.__name__ - with p.group(len(name) + 1, name + "(", ")"): + name = 'OrderedDict' + with p.group(len(name) + 1, name + '(', ')'): if cycle: - p.text("...") + p.text('...') elif len(obj): p.pretty(list(obj.items())) def _deque_pprint(obj, p, cycle): - name = obj.__class__.__name__ - with p.group(len(name) + 1, name + "(", ")"): + name = 'deque' + with p.group(len(name) + 1, name + '(', ')'): if cycle: - p.text("...") + p.text('...') else: p.pretty(list(obj)) def _counter_pprint(obj, p, cycle): - name = obj.__class__.__name__ - with p.group(len(name) + 1, name + "(", ")"): + name = 'Counter' + with p.group(len(name) + 1, name + '(', ')'): if cycle: - p.text("...") + p.text('...') elif len(obj): p.pretty(dict(obj)) + +for_type_by_name('collections', 'defaultdict', _defaultdict_pprint) +for_type_by_name('collections', 'OrderedDict', _ordereddict_pprint) +for_type_by_name('collections', 'deque', _deque_pprint) +for_type_by_name('collections', 'Counter', _counter_pprint) + +if __name__ == '__main__': + from random import randrange + + class Foo(object): + def __init__(self): + self.foo = 1 + self.bar = re.compile(r'\s+') + self.blub = dict.fromkeys(range(30), randrange(1, 40)) + self.hehe = 23424.234234 + self.list = ["blub", "blah", self] + + def get_foo(self): + print("foo") + + pprint(Foo(), verbose=True) diff --git a/xonsh/proc.py b/xonsh/proc.py index d792506..8a79555 100644 --- a/xonsh/proc.py +++ b/xonsh/proc.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """Interface for running Python functions as subprocess-mode commands. Code for several helper methods in the `ProcProxy` class have been reproduced @@ -9,1261 +8,55 @@ """ import io import os -import re import sys import time -import queue -import array -import ctypes -import signal -import inspect import builtins -import functools -import threading -import subprocess -import collections.abc as cabc +from threading import Thread +from collections import Sequence +from subprocess import Popen, PIPE, DEVNULL, STDOUT, TimeoutExpired -from xonsh.platform import ( - ON_WINDOWS, - ON_POSIX, - ON_MSYS, - ON_CYGWIN, - CAN_RESIZE_WINDOW, - LFLAG, - CC, -) -from xonsh.tools import ( - redirect_stdout, - redirect_stderr, - print_exception, - XonshCalledProcessError, - findfirst, - on_main_thread, - XonshError, - format_std_prepost, - ALIAS_KWARG_NAMES, -) -from xonsh.lazyasd import lazyobject, LazyObject -from xonsh.jobs import wait_for_active_job, give_terminal_to, _continue -from xonsh.lazyimps import fcntl, termios, _winapi, msvcrt, winutils +from xonsh.tools import redirect_stdout, redirect_stderr, ON_WINDOWS, ON_LINUX, \ + fallback, print_exception -# these decorators are imported for users back-compatible -from xonsh.tools import unthreadable, uncapturable # NOQA +if ON_LINUX: + from xonsh.teepty import TeePTY +else: + TeePTY = None -# foreground has be deprecated -foreground = unthreadable +if ON_WINDOWS: + import _winapi + import msvcrt + class Handle(int): + closed = False -@lazyobject -def STDOUT_CAPTURE_KINDS(): - return frozenset(["stdout", "object"]) + def Close(self, CloseHandle=_winapi.CloseHandle): + if not self.closed: + self.closed = True + CloseHandle(self) + def Detach(self): + if not self.closed: + self.closed = True + return int(self) + raise ValueError("already closed") -# The following escape codes are xterm codes. -# See http://rtfm.etla.org/xterm/ctlseq.html for more. -MODE_NUMS = ("1049", "47", "1047") -START_ALTERNATE_MODE = LazyObject( - lambda: frozenset("\x1b[?{0}h".format(i).encode() for i in MODE_NUMS), - globals(), - "START_ALTERNATE_MODE", -) -END_ALTERNATE_MODE = LazyObject( - lambda: frozenset("\x1b[?{0}l".format(i).encode() for i in MODE_NUMS), - globals(), - "END_ALTERNATE_MODE", -) -ALTERNATE_MODE_FLAGS = LazyObject( - lambda: tuple(START_ALTERNATE_MODE) + tuple(END_ALTERNATE_MODE), - globals(), - "ALTERNATE_MODE_FLAGS", -) -RE_HIDDEN_BYTES = LazyObject( - lambda: re.compile(b"(\001.*?\002)"), globals(), "RE_HIDDEN" -) + def __repr__(self): + return "Handle(%d)" % int(self) + __del__ = Close + __str__ = __repr__ -@lazyobject -def RE_VT100_ESCAPE(): - return re.compile(b"(\x9B|\x1B\\[)[0-?]*[ -\\/]*[@-~]") - -@lazyobject -def RE_HIDE_ESCAPE(): - return re.compile( - b"(" + RE_HIDDEN_BYTES.pattern + b"|" + RE_VT100_ESCAPE.pattern + b")" - ) - - -class QueueReader: - """Provides a file-like interface to reading from a queue.""" - - def __init__(self, fd, timeout=None): - """ - Parameters - ---------- - fd : int - A file descriptor - timeout : float or None, optional - The queue reading timeout. - """ - self.fd = fd - self.timeout = timeout - self.closed = False - self.queue = queue.Queue() - self.thread = None - - def close(self): - """close the reader""" - self.closed = True - - def is_fully_read(self): - """Returns whether or not the queue is fully read and the reader is - closed. - """ - return ( - self.closed - and (self.thread is None or not self.thread.is_alive()) - and self.queue.empty() - ) - - def read_queue(self): - """Reads a single chunk from the queue. This is blocking if - the timeout is None and non-blocking otherwise. - """ - try: - return self.queue.get(block=True, timeout=self.timeout) - except queue.Empty: - return b"" - - def read(self, size=-1): - """Reads bytes from the file.""" - i = 0 - buf = b"" - while size < 0 or i != size: - line = self.read_queue() - if line: - buf += line - else: - break - i += len(line) - return buf - - def readline(self, size=-1): - """Reads a line, or a partial line from the file descriptor.""" - i = 0 - nl = b"\n" - buf = b"" - while size < 0 or i != size: - line = self.read_queue() - if line: - buf += line - if line.endswith(nl): - break - else: - break - i += len(line) - return buf - - def _read_all_lines(self): - """This reads all remaining lines in a blocking fashion.""" - lines = [] - while not self.is_fully_read(): - chunk = self.read_queue() - lines.extend(chunk.splitlines(keepends=True)) - return lines - - def readlines(self, hint=-1): - """Reads lines from the file descriptor. This is blocking for negative - hints (i.e. read all the remaining lines) and non-blocking otherwise. - """ - if hint == -1: - return self._read_all_lines() - lines = [] - while len(lines) != hint: - chunk = self.read_queue() - if not chunk: - break - lines.extend(chunk.splitlines(keepends=True)) - return lines - - def fileno(self): - """Returns the file descriptor number.""" - return self.fd - - @staticmethod - def readable(): - """Returns true, because this object is always readable.""" - return True - - def iterqueue(self): - """Iterates through all remaining chunks in a blocking fashion.""" - while not self.is_fully_read(): - chunk = self.read_queue() - if not chunk: - continue - yield chunk - - -def populate_fd_queue(reader, fd, queue): - """Reads 1 kb of data from a file descriptor into a queue. - If this ends or fails, it flags the calling reader object as closed. - """ - while True: - try: - c = os.read(fd, 1024) - except OSError: - reader.closed = True - break - if c: - queue.put(c) - else: - reader.closed = True - break - - -class NonBlockingFDReader(QueueReader): - """A class for reading characters from a file descriptor on a background - thread. This has the advantages that the calling thread can close the - file and that the reading does not block the calling thread. - """ - - def __init__(self, fd, timeout=None): - """ - Parameters - ---------- - fd : int - A file descriptor - timeout : float or None, optional - The queue reading timeout. - """ - super().__init__(fd, timeout=timeout) - # start reading from stream - self.thread = threading.Thread( - target=populate_fd_queue, args=(self, self.fd, self.queue) - ) - self.thread.daemon = True - self.thread.start() - - -def populate_buffer(reader, fd, buffer, chunksize): - """Reads bytes from the file descriptor and copies them into a buffer. - - The reads happen in parallel using the pread() syscall; which is only - available on POSIX systems. If the read fails for any reason, the reader is - flagged as closed. - """ - offset = 0 - while True: - try: - buf = os.pread(fd, chunksize, offset) - except OSError: - reader.closed = True - break - if buf: - buffer.write(buf) - offset += len(buf) - else: - reader.closed = True - break - - -class BufferedFDParallelReader: - """Buffered, parallel background thread reader.""" - - def __init__(self, fd, buffer=None, chunksize=1024): - """ - Parameters - ---------- - fd : int - File descriptor from which to read. - buffer : binary file-like or None, optional - A buffer to write bytes into. If None, a new BytesIO object - is created. - chunksize : int, optional - The max size of the parallel reads, default 1 kb. - """ - self.fd = fd - self.buffer = io.BytesIO() if buffer is None else buffer - self.chunksize = chunksize - self.closed = False - # start reading from stream - self.thread = threading.Thread( - target=populate_buffer, args=(self, fd, self.buffer, chunksize) - ) - self.thread.daemon = True - - self.thread.start() - - -def _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd): - # if we are getting close to the end of the console buffer, - # expand it so that we can read from it successfully. - if cols == 0: - return orig_posize[-1], max_offset, orig_posize - rows = ((max_offset + expandsize) // cols) + 1 - winutils.set_console_screen_buffer_size(cols, rows, fd=fd) - orig_posize = orig_posize[:3] + (rows,) - max_offset = (rows - 1) * cols - return rows, max_offset, orig_posize - - -def populate_console(reader, fd, buffer, chunksize, queue, expandsize=None): - """Reads bytes from the file descriptor and puts lines into the queue. - The reads happened in parallel, - using xonsh.winutils.read_console_output_character(), - and is thus only available on windows. If the read fails for any reason, - the reader is flagged as closed. - """ - # OK, so this function is super annoying because Windows stores its - # buffers as a 2D regular, dense array -- without trailing newlines. - # Meanwhile, we want to add *lines* to the queue. Also, as is typical - # with parallel reads, the entire buffer that you ask for may not be - # filled. Thus we have to deal with the full generality. - # 1. reads may end in the middle of a line - # 2. excess whitespace at the end of a line may not be real, unless - # 3. you haven't read to the end of the line yet! - # So there are alignment issues everywhere. Also, Windows will automatically - # read past the current cursor position, even though there is presumably - # nothing to see there. - # - # These chunked reads basically need to happen like this because, - # a. The default buffer size is HUGE for the console (90k lines x 120 cols) - # as so we can't just read in everything at the end and see what we - # care about without a noticeable performance hit. - # b. Even with this huge size, it is still possible to write more lines than - # this, so we should scroll along with the console. - # Unfortunately, because we do not have control over the terminal emulator, - # It is not possible to compute how far back we should set the beginning - # read position because we don't know how many characters have been popped - # off the top of the buffer. If we did somehow know this number we could do - # something like the following: - # - # new_offset = (y*cols) + x - # if new_offset == max_offset: - # new_offset -= scrolled_offset - # x = new_offset%cols - # y = new_offset//cols - # continue - # - # So this method is imperfect and only works as long as the screen has - # room to expand to. Thus the trick here is to expand the screen size - # when we get close enough to the end of the screen. There remain some - # async issues related to not being able to set the cursor position. - # but they just affect the alignment / capture of the output of the - # first command run after a screen resize. - if expandsize is None: - expandsize = 100 * chunksize - x, y, cols, rows = posize = winutils.get_position_size(fd) - pre_x = pre_y = -1 - orig_posize = posize - offset = (cols * y) + x - max_offset = (rows - 1) * cols - # I believe that there is a bug in PTK that if we reset the - # cursor position, the cursor on the next prompt is accidentally on - # the next line. If this is fixed, uncomment the following line. - # if max_offset < offset + expandsize: - # rows, max_offset, orig_posize = _expand_console_buffer( - # cols, max_offset, expandsize, - # orig_posize, fd) - # winutils.set_console_cursor_position(x, y, fd=fd) - while True: - posize = winutils.get_position_size(fd) - offset = (cols * y) + x - if ((posize[1], posize[0]) <= (y, x) and posize[2:] == (cols, rows)) or ( - pre_x == x and pre_y == y - ): - # already at or ahead of the current cursor position. - if reader.closed: - break - else: - time.sleep(reader.timeout) - continue - elif max_offset <= offset + expandsize: - ecb = _expand_console_buffer(cols, max_offset, expandsize, orig_posize, fd) - rows, max_offset, orig_posize = ecb - continue - elif posize[2:] == (cols, rows): - # cursor updated but screen size is the same. - pass - else: - # screen size changed, which is offset preserving - orig_posize = posize - cols, rows = posize[2:] - x = offset % cols - y = offset // cols - pre_x = pre_y = -1 - max_offset = (rows - 1) * cols - continue - try: - buf = winutils.read_console_output_character( - x=x, y=y, fd=fd, buf=buffer, bufsize=chunksize, raw=True - ) - except (OSError, IOError): - reader.closed = True - break - # cursor position and offset - if not reader.closed: - buf = buf.rstrip() - nread = len(buf) - if nread == 0: - time.sleep(reader.timeout) - continue - cur_x, cur_y = posize[0], posize[1] - cur_offset = (cols * cur_y) + cur_x - beg_offset = (cols * y) + x - end_offset = beg_offset + nread - if end_offset > cur_offset and cur_offset != max_offset: - buf = buf[: cur_offset - end_offset] - # convert to lines - xshift = cols - x - yshift = (nread // cols) + (1 if nread % cols > 0 else 0) - lines = [buf[:xshift]] - lines += [ - buf[l * cols + xshift : (l + 1) * cols + xshift] for l in range(yshift) - ] - lines = [line for line in lines if line] - if not lines: - time.sleep(reader.timeout) - continue - # put lines in the queue - nl = b"\n" - for line in lines[:-1]: - queue.put(line.rstrip() + nl) - if len(lines[-1]) == xshift: - queue.put(lines[-1].rstrip() + nl) - else: - queue.put(lines[-1]) - # update x and y locations - if (beg_offset + len(buf)) % cols == 0: - new_offset = beg_offset + len(buf) - else: - new_offset = beg_offset + len(buf.rstrip()) - pre_x = x - pre_y = y - x = new_offset % cols - y = new_offset // cols - time.sleep(reader.timeout) - - -class ConsoleParallelReader(QueueReader): - """Parallel reader for consoles that runs in a background thread. - This is only needed, available, and useful on Windows. - """ - - def __init__(self, fd, buffer=None, chunksize=1024, timeout=None): - """ - Parameters - ---------- - fd : int - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr. - buffer : ctypes.c_wchar_p, optional - An existing buffer to (re-)use. - chunksize : int, optional - The max size of the parallel reads, default 1 kb. - timeout : float, optional - The queue reading timeout. - """ - timeout = timeout or builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY") - super().__init__(fd, timeout=timeout) - self._buffer = buffer # this cannot be public - if buffer is None: - self._buffer = ctypes.c_char_p(b" " * chunksize) - self.chunksize = chunksize - # start reading from stream - self.thread = threading.Thread( - target=populate_console, - args=(self, fd, self._buffer, chunksize, self.queue), - ) - self.thread.daemon = True - self.thread.start() - - -def safe_fdclose(handle, cache=None): - """Closes a file handle in the safest way possible, and potentially - storing the result. - """ - if cache is not None and cache.get(handle, False): - return - status = True - if handle is None: - pass - elif isinstance(handle, int): - if handle >= 3: - # don't close stdin, stdout, stderr, -1 - try: - os.close(handle) - except OSError: - status = False - elif handle is sys.stdin or handle is sys.stdout or handle is sys.stderr: - # don't close stdin, stdout, or stderr - pass - else: - try: - handle.close() - except OSError: - status = False - if cache is not None: - cache[handle] = status - - -def safe_flush(handle): - """Attempts to safely flush a file handle, returns success bool.""" - status = True - try: - handle.flush() - except OSError: - status = False - return status - - -def still_writable(fd): - """Determines whether a file descriptor is still writable by trying to - write an empty string and seeing if it fails. - """ - try: - os.write(fd, b"") - status = True - except OSError: - status = False - return status - - -class PopenThread(threading.Thread): - """A thread for running and managing subprocess. This allows reading - from the stdin, stdout, and stderr streams in a non-blocking fashion. - - This takes the same arguments and keyword arguments as regular Popen. - This requires that the captured_stdout and captured_stderr attributes - to be set following instantiation. - """ - - def __init__(self, *args, stdin=None, stdout=None, stderr=None, **kwargs): - super().__init__() - self.lock = threading.RLock() - env = builtins.__xonsh__.env - # stdin setup - self.orig_stdin = stdin - if stdin is None: - self.stdin_fd = 0 - elif isinstance(stdin, int): - self.stdin_fd = stdin - else: - self.stdin_fd = stdin.fileno() - self.store_stdin = env.get("XONSH_STORE_STDIN") - self.timeout = env.get("XONSH_PROC_FREQUENCY") - self.in_alt_mode = False - self.stdin_mode = None - # stdout setup - self.orig_stdout = stdout - self.stdout_fd = 1 if stdout is None else stdout.fileno() - self._set_pty_size() - # stderr setup - self.orig_stderr = stderr - # Set some signal handles, if we can. Must come before process - # is started to prevent deadlock on windows - self.proc = None # has to be here for closure for handles - self.old_int_handler = self.old_winch_handler = None - self.old_tstp_handler = self.old_quit_handler = None - if on_main_thread(): - self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int) - if ON_POSIX: - self.old_tstp_handler = signal.signal(signal.SIGTSTP, self._signal_tstp) - self.old_quit_handler = signal.signal(signal.SIGQUIT, self._signal_quit) - if CAN_RESIZE_WINDOW: - self.old_winch_handler = signal.signal( - signal.SIGWINCH, self._signal_winch - ) - # start up process - if ON_WINDOWS and stdout is not None: - os.set_inheritable(stdout.fileno(), False) - - try: - self.proc = proc = subprocess.Popen( - *args, stdin=stdin, stdout=stdout, stderr=stderr, **kwargs - ) - except Exception: - self._clean_up() - raise - - self.pid = proc.pid - self.universal_newlines = uninew = proc.universal_newlines - if uninew: - self.encoding = enc = env.get("XONSH_ENCODING") - self.encoding_errors = err = env.get("XONSH_ENCODING_ERRORS") - self.stdin = io.BytesIO() # stdin is always bytes! - self.stdout = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err) - self.stderr = io.TextIOWrapper(io.BytesIO(), encoding=enc, errors=err) - else: - self.encoding = self.encoding_errors = None - self.stdin = io.BytesIO() - self.stdout = io.BytesIO() - self.stderr = io.BytesIO() - self.suspended = False - self.prevs_are_closed = False - self.start() - - def run(self): - """Runs the subprocess by performing a parallel read on stdin if allowed, - and copying bytes from captured_stdout to stdout and bytes from - captured_stderr to stderr. - """ - proc = self.proc - spec = self._wait_and_getattr("spec") - # get stdin and apply parallel reader if needed. - stdin = self.stdin - if self.orig_stdin is None: - origin = None - elif ON_POSIX and self.store_stdin: - origin = self.orig_stdin - origfd = origin if isinstance(origin, int) else origin.fileno() - origin = BufferedFDParallelReader(origfd, buffer=stdin) - else: - origin = None - # get non-blocking stdout - stdout = self.stdout.buffer if self.universal_newlines else self.stdout - capout = spec.captured_stdout - if capout is None: - procout = None - else: - procout = NonBlockingFDReader(capout.fileno(), timeout=self.timeout) - # get non-blocking stderr - stderr = self.stderr.buffer if self.universal_newlines else self.stderr - caperr = spec.captured_stderr - if caperr is None: - procerr = None - else: - procerr = NonBlockingFDReader(caperr.fileno(), timeout=self.timeout) - # initial read from buffer - self._read_write(procout, stdout, sys.__stdout__) - self._read_write(procerr, stderr, sys.__stderr__) - # loop over reads while process is running. - i = j = cnt = 1 - while proc.poll() is None: - # this is here for CPU performance reasons. - if i + j == 0: - cnt = min(cnt + 1, 1000) - tout = self.timeout * cnt - if procout is not None: - procout.timeout = tout - if procerr is not None: - procerr.timeout = tout - elif cnt == 1: - pass - else: - cnt = 1 - if procout is not None: - procout.timeout = self.timeout - if procerr is not None: - procerr.timeout = self.timeout - # redirect some output! - i = self._read_write(procout, stdout, sys.__stdout__) - j = self._read_write(procerr, stderr, sys.__stderr__) - if self.suspended: - break - if self.suspended: - return - # close files to send EOF to non-blocking reader. - # capout & caperr seem to be needed only by Windows, while - # orig_stdout & orig_stderr are need by posix and Windows. - # Also, order seems to matter here, - # with orig_* needed to be closed before cap* - safe_fdclose(self.orig_stdout) - safe_fdclose(self.orig_stderr) - if ON_WINDOWS: - safe_fdclose(capout) - safe_fdclose(caperr) - # read in the remaining data in a blocking fashion. - while (procout is not None and not procout.is_fully_read()) or ( - procerr is not None and not procerr.is_fully_read() - ): - self._read_write(procout, stdout, sys.__stdout__) - self._read_write(procerr, stderr, sys.__stderr__) - # kill the process if it is still alive. Happens when piping. - if proc.poll() is None: - proc.terminate() - - def _wait_and_getattr(self, name): - """make sure the instance has a certain attr, and return it.""" - while not hasattr(self, name): - time.sleep(1e-7) - return getattr(self, name) - - def _read_write(self, reader, writer, stdbuf): - """Reads a chunk of bytes from a buffer and write into memory or back - down to the standard buffer, as appropriate. Returns the number of - successful reads. - """ - if reader is None: - return 0 - i = -1 - for i, chunk in enumerate(iter(reader.read_queue, b"")): - self._alt_mode_switch(chunk, writer, stdbuf) - if i >= 0: - writer.flush() - stdbuf.flush() - return i + 1 - - def _alt_mode_switch(self, chunk, membuf, stdbuf): - """Enables recursively switching between normal capturing mode - and 'alt' mode, which passes through values to the standard - buffer. Pagers, text editors, curses applications, etc. use - alternate mode. - """ - i, flag = findfirst(chunk, ALTERNATE_MODE_FLAGS) - if flag is None: - self._alt_mode_writer(chunk, membuf, stdbuf) - else: - # This code is executed when the child process switches the - # terminal into or out of alternate mode. The line below assumes - # that the user has opened vim, less, or similar, and writes writes - # to stdin. - j = i + len(flag) - # write the first part of the chunk in the current mode. - self._alt_mode_writer(chunk[:i], membuf, stdbuf) - # switch modes - # write the flag itself the current mode where alt mode is on - # so that it is streamed to the terminal ASAP. - # this is needed for terminal emulators to find the correct - # positions before and after alt mode. - alt_mode = flag in START_ALTERNATE_MODE - if alt_mode: - self.in_alt_mode = alt_mode - self._alt_mode_writer(flag, membuf, stdbuf) - self._enable_cbreak_stdin() - else: - self._alt_mode_writer(flag, membuf, stdbuf) - self.in_alt_mode = alt_mode - self._disable_cbreak_stdin() - # recurse this function, but without the current flag. - self._alt_mode_switch(chunk[j:], membuf, stdbuf) - - def _alt_mode_writer(self, chunk, membuf, stdbuf): - """Write bytes to the standard buffer if in alt mode or otherwise - to the in-memory buffer. - """ - if not chunk: - pass # don't write empty values - elif self.in_alt_mode: - stdbuf.buffer.write(chunk) - else: - with self.lock: - p = membuf.tell() - membuf.seek(0, io.SEEK_END) - membuf.write(chunk) - membuf.seek(p) - - # - # Window resize handlers - # - - def _signal_winch(self, signum, frame): - """Signal handler for SIGWINCH - window size has changed.""" - self.send_signal(signal.SIGWINCH) - self._set_pty_size() - - def _set_pty_size(self): - """Sets the window size of the child pty based on the window size of - our own controlling terminal. - """ - if ON_WINDOWS or not os.isatty(self.stdout_fd): - return - # Get the terminal size of the real terminal, set it on the - # pseudoterminal. - buf = array.array("h", [0, 0, 0, 0]) - # 1 = stdout here - try: - fcntl.ioctl(1, termios.TIOCGWINSZ, buf, True) - fcntl.ioctl(self.stdout_fd, termios.TIOCSWINSZ, buf) - except OSError: - pass - - # - # SIGINT handler - # - - def _signal_int(self, signum, frame): - """Signal handler for SIGINT - Ctrl+C may have been pressed.""" - self.send_signal(signum) - if self.proc is not None and self.proc.poll() is not None: - self._restore_sigint(frame=frame) - if on_main_thread(): - signal.pthread_kill(threading.get_ident(), signal.SIGINT) - - def _restore_sigint(self, frame=None): - old = self.old_int_handler - if old is not None: - if on_main_thread(): - signal.signal(signal.SIGINT, old) - self.old_int_handler = None - if frame is not None: - self._disable_cbreak_stdin() - if old is not None and old is not self._signal_int: - old(signal.SIGINT, frame) - - # - # SIGTSTP handler - # - - def _signal_tstp(self, signum, frame): - """Signal handler for suspending SIGTSTP - Ctrl+Z may have been pressed. - """ - self.suspended = True - self.send_signal(signum) - self._restore_sigtstp(frame=frame) - - def _restore_sigtstp(self, frame=None): - old = self.old_tstp_handler - if old is not None: - if on_main_thread(): - signal.signal(signal.SIGTSTP, old) - self.old_tstp_handler = None - if frame is not None: - self._disable_cbreak_stdin() - - # - # SIGQUIT handler - # - - def _signal_quit(self, signum, frame): - r"""Signal handler for quiting SIGQUIT - Ctrl+\ may have been pressed. - """ - self.send_signal(signum) - self._restore_sigquit(frame=frame) - - def _restore_sigquit(self, frame=None): - old = self.old_quit_handler - if old is not None: - if on_main_thread(): - signal.signal(signal.SIGQUIT, old) - self.old_quit_handler = None - if frame is not None: - self._disable_cbreak_stdin() - - # - # cbreak mode handlers - # - - def _enable_cbreak_stdin(self): - if not ON_POSIX: - return - try: - self.stdin_mode = termios.tcgetattr(self.stdin_fd)[:] - except termios.error: - # this can happen for cases where another process is controlling - # xonsh's tty device, such as in testing. - self.stdin_mode = None - return - new = self.stdin_mode[:] - new[LFLAG] &= ~(termios.ECHO | termios.ICANON) - new[CC][termios.VMIN] = 1 - new[CC][termios.VTIME] = 0 - try: - # termios.TCSAFLUSH may be less reliable than termios.TCSANOW - termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new) - except termios.error: - self._disable_cbreak_stdin() - - def _disable_cbreak_stdin(self): - if not ON_POSIX or self.stdin_mode is None: - return - new = self.stdin_mode[:] - new[LFLAG] |= termios.ECHO | termios.ICANON - new[CC][termios.VMIN] = 1 - new[CC][termios.VTIME] = 0 - try: - termios.tcsetattr(self.stdin_fd, termios.TCSANOW, new) - except termios.error: - pass - - # - # Dispatch methods - # - - def poll(self): - """Dispatches to Popen.returncode.""" - return self.proc.returncode - - def wait(self, timeout=None): - """Dispatches to Popen.wait(), but also does process cleanup such as - joining this thread and replacing the original window size signal - handler. - """ - self._disable_cbreak_stdin() - rtn = self.proc.wait(timeout=timeout) - self.join() - # need to replace the old signal handlers somewhere... - if self.old_winch_handler is not None and on_main_thread(): - signal.signal(signal.SIGWINCH, self.old_winch_handler) - self.old_winch_handler = None - self._clean_up() - return rtn - - def _clean_up(self): - self._restore_sigint() - self._restore_sigtstp() - self._restore_sigquit() - - @property - def returncode(self): - """Process return code.""" - return self.proc.returncode - - @returncode.setter - def returncode(self, value): - """Process return code.""" - self.proc.returncode = value - - @property - def signal(self): - """Process signal, or None.""" - s = getattr(self.proc, "signal", None) - if s is None: - rtn = self.returncode - if rtn is not None and rtn != 0: - s = (-1 * rtn, rtn < 0 if ON_WINDOWS else os.WCOREDUMP(rtn)) - return s - - @signal.setter - def signal(self, value): - """Process signal, or None.""" - self.proc.signal = value - - def send_signal(self, signal): - """Dispatches to Popen.send_signal().""" - dt = 0.0 - while self.proc is None and dt < self.timeout: - time.sleep(1e-7) - dt += 1e-7 - if self.proc is None: - return - try: - rtn = self.proc.send_signal(signal) - except ProcessLookupError: - # This can happen in the case of !(cmd) when the command has ended - rtn = None - return rtn - - def terminate(self): - """Dispatches to Popen.terminate().""" - return self.proc.terminate() - - def kill(self): - """Dispatches to Popen.kill().""" - return self.proc.kill() - - -class Handle(int): - closed = False - - def Close(self, CloseHandle=None): - CloseHandle = CloseHandle or _winapi.CloseHandle - if not self.closed: - self.closed = True - CloseHandle(self) - - def Detach(self): - if not self.closed: - self.closed = True - return int(self) - raise ValueError("already closed") - - def __repr__(self): - return "Handle(%d)" % int(self) - - __del__ = Close - __str__ = __repr__ - - -class FileThreadDispatcher: - """Dispatches to different file handles depending on the - current thread. Useful if you want file operation to go to different - places for different threads. - """ - - def __init__(self, default=None): - """ - Parameters - ---------- - default : file-like or None, optional - The file handle to write to if a thread cannot be found in - the registry. If None, a new in-memory instance. - - Attributes - ---------- - registry : dict - Maps thread idents to file handles. - """ - if default is None: - default = io.TextIOWrapper(io.BytesIO()) - self.default = default - self.registry = {} - - def register(self, handle): - """Registers a file handle for the current thread. Returns self so - that this method can be used in a with-statement. - """ - if handle is self: - # prevent weird recurssion errors - return self - self.registry[threading.get_ident()] = handle - return self - - def deregister(self): - """Removes the current thread from the registry.""" - ident = threading.get_ident() - if ident in self.registry: - # don't remove if we have already been deregistered - del self.registry[threading.get_ident()] - - @property - def available(self): - """True if the thread is available in the registry.""" - return threading.get_ident() in self.registry - - @property - def handle(self): - """Gets the current handle for the thread.""" - return self.registry.get(threading.get_ident(), self.default) - - def __enter__(self): - pass - - def __exit__(self, ex_type, ex_value, ex_traceback): - self.deregister() - - # - # io.TextIOBase interface - # - - @property - def encoding(self): - """Gets the encoding for this thread's handle.""" - return self.handle.encoding - - @property - def errors(self): - """Gets the errors for this thread's handle.""" - return self.handle.errors - - @property - def newlines(self): - """Gets the newlines for this thread's handle.""" - return self.handle.newlines - - @property - def buffer(self): - """Gets the buffer for this thread's handle.""" - return self.handle.buffer - - def detach(self): - """Detaches the buffer for the current thread.""" - return self.handle.detach() - - def read(self, size=None): - """Reads from the handle for the current thread.""" - return self.handle.read(size) - - def readline(self, size=-1): - """Reads a line from the handle for the current thread.""" - return self.handle.readline(size) - - def readlines(self, hint=-1): - """Reads lines from the handle for the current thread.""" - return self.handle.readlines(hint) - - def seek(self, offset, whence=io.SEEK_SET): - """Seeks the current file.""" - return self.handle.seek(offset, whence) - - def tell(self): - """Reports the current position in the handle for the current thread.""" - return self.handle.tell() - - def write(self, s): - """Writes to this thread's handle. This also flushes, just to be - extra sure the string was written. - """ - h = self.handle - try: - r = h.write(s) - h.flush() - except OSError: - r = None - return r - - @property - def line_buffering(self): - """Gets if line buffering for this thread's handle enabled.""" - return self.handle.line_buffering - - # - # io.IOBase interface - # - - def close(self): - """Closes the current thread's handle.""" - return self.handle.close() - - @property - def closed(self): - """Is the thread's handle closed.""" - return self.handle.closed - - def fileno(self): - """Returns the file descriptor for the current thread.""" - return self.handle.fileno() - - def flush(self): - """Flushes the file descriptor for the current thread.""" - return safe_flush(self.handle) - - def isatty(self): - """Returns if the file descriptor for the current thread is a tty.""" - return self.handle.isatty() - - def readable(self): - """Returns if file descriptor for the current thread is readable.""" - return self.handle.readable() - - def seekable(self): - """Returns if file descriptor for the current thread is seekable.""" - return self.handle.seekable() - - def truncate(self, size=None): - """Truncates the file for for the current thread.""" - return self.handle.truncate() - - def writable(self, size=None): - """Returns if file descriptor for the current thread is writable.""" - return self.handle.writable(size) - - def writelines(self): - """Writes lines for the file descriptor for the current thread.""" - return self.handle.writelines() - - -# These should NOT be lazy since they *need* to get the true stdout from the -# main thread. Also their creation time should be negligible. -STDOUT_DISPATCHER = FileThreadDispatcher(default=sys.stdout) -STDERR_DISPATCHER = FileThreadDispatcher(default=sys.stderr) - - -def parse_proxy_return(r, stdout, stderr): - """Proxies may return a variety of outputs. This handles them generally. - - Parameters - ---------- - r : tuple, str, int, or None - Return from proxy function - stdout : file-like - Current stdout stream - stdout : file-like - Current stderr stream - - Returns - ------- - cmd_result : int - The return code of the proxy - """ - cmd_result = 0 - if isinstance(r, str): - stdout.write(r) - stdout.flush() - elif isinstance(r, int): - cmd_result = r - elif isinstance(r, cabc.Sequence): - rlen = len(r) - if rlen > 0 and r[0] is not None: - stdout.write(r[0]) - stdout.flush() - if rlen > 1 and r[1] is not None: - stderr.write(r[1]) - stderr.flush() - if rlen > 2 and r[2] is not None: - cmd_result = r[2] - elif r is not None: - # for the random object... - stdout.write(str(r)) - stdout.flush() - return cmd_result - - -def proxy_zero(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes no parameters.""" - return f() - - -def proxy_one(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes one parameter: args""" - return f(args) - - -def proxy_two(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes two parameter: args and stdin.""" - return f(args, stdin) - - -def proxy_three(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes three parameter: args, stdin, stdout. - """ - return f(args, stdin, stdout) - - -def proxy_four(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes four parameter: args, stdin, stdout, - and stderr. - """ - return f(args, stdin, stdout, stderr) - - -def proxy_five(f, args, stdin, stdout, stderr, spec, stack): - """Calls a proxy function which takes four parameter: args, stdin, stdout, - stderr, and spec. - """ - return f(args, stdin, stdout, stderr, spec) - - -PROXIES = (proxy_zero, proxy_one, proxy_two, proxy_three, proxy_four, proxy_five) - - -def partial_proxy(f): - """Dispatches the appropriate proxy function based on the number of args.""" - numargs = 0 - for name, param in inspect.signature(f).parameters.items(): - if ( - param.kind == param.POSITIONAL_ONLY - or param.kind == param.POSITIONAL_OR_KEYWORD - ): - numargs += 1 - elif name in ALIAS_KWARG_NAMES and param.kind == param.KEYWORD_ONLY: - numargs += 1 - if numargs < 6: - return functools.partial(PROXIES[numargs], f) - elif numargs == 6: - # don't need to partial. - return f - else: - e = "Expected proxy with 6 or fewer arguments for {}, not {}" - raise XonshError(e.format(", ".join(ALIAS_KWARG_NAMES), numargs)) - - -class ProcProxyThread(threading.Thread): +class ProcProxy(Thread): """ Class representing a function to be run as a subprocess-mode command. """ - - def __init__( - self, - f, - args, - stdin=None, - stdout=None, - stderr=None, - universal_newlines=False, - close_fds=False, - env=None, - ): + def __init__(self, f, args, + stdin=None, + stdout=None, + stderr=None, + universal_newlines=False): """Parameters ---------- f : function @@ -1284,38 +77,41 @@ def __init__( A file-like object representing stderr (error output can be written here). If `stderr` is not provided or if it is explicitly set to `None`, then `sys.stderr` is used. - universal_newlines : bool, optional - Whether or not to use universal newlines. - close_fds : bool, optional - Whether or not to close file descriptors. This is here for Popen - compatability and currently does nothing. - env : Mapping, optional - Environment mapping. """ - self.orig_f = f - self.f = partial_proxy(f) + self.f = f + """ + The function to be executed. It should be a function of four + arguments, described below. + + Parameters + ---------- + args : list + A (possibly empty) list containing the arguments that were given on + the command line + stdin : file-like + A file-like object representing stdin (input can be read from + here). + stdout : file-like + A file-like object representing stdout (normal output can be + written here). + stderr : file-like + A file-like object representing stderr (error output can be + written here). + """ self.args = args self.pid = None self.returncode = None - self._closed_handle_cache = {} + self.wait = self.join handles = self._get_handles(stdin, stdout, stderr) - ( - self.p2cread, - self.p2cwrite, - self.c2pread, - self.c2pwrite, - self.errread, - self.errwrite, - ) = handles + (self.p2cread, self.p2cwrite, + self.c2pread, self.c2pwrite, + self.errread, self.errwrite) = handles # default values self.stdin = stdin - self.stdout = stdout - self.stderr = stderr - self.close_fds = close_fds - self.env = env or builtins.__xonsh__.env - self._interrupted = False + self.stdout = None + self.stderr = None if ON_WINDOWS: if self.p2cwrite != -1: @@ -1326,36 +122,23 @@ def __init__( self.errread = msvcrt.open_osfhandle(self.errread.Detach(), 0) if self.p2cwrite != -1: - self.stdin = io.open(self.p2cwrite, "wb", -1) + self.stdin = io.open(self.p2cwrite, 'wb', -1) if universal_newlines: - self.stdin = io.TextIOWrapper( - self.stdin, write_through=True, line_buffering=False - ) - elif isinstance(stdin, int) and stdin != 0: - self.stdin = io.open(stdin, "wb", -1) - + self.stdin = io.TextIOWrapper(self.stdin, write_through=True, + line_buffering=False) if self.c2pread != -1: - self.stdout = io.open(self.c2pread, "rb", -1) + self.stdout = io.open(self.c2pread, 'rb', -1) if universal_newlines: self.stdout = io.TextIOWrapper(self.stdout) if self.errread != -1: - self.stderr = io.open(self.errread, "rb", -1) + self.stderr = io.open(self.errread, 'rb', -1) if universal_newlines: self.stderr = io.TextIOWrapper(self.stderr) - # Set some signal handles, if we can. Must come before process - # is started to prevent deadlock on windows - self.old_int_handler = None - if on_main_thread(): - self.old_int_handler = signal.signal(signal.SIGINT, self._signal_int) - # start up the proc - super().__init__() + Thread.__init__(self) self.start() - def __del__(self): - self._restore_sigint() - def run(self): """Set up input/output streams and execute the child function in a new thread. This is part of the `threading.Thread` interface and should @@ -1363,164 +146,54 @@ def run(self): """ if self.f is None: return - spec = self._wait_and_getattr("spec") - last_in_pipeline = spec.last_in_pipeline - if last_in_pipeline: - capout = spec.captured_stdout # NOQA - caperr = spec.captured_stderr # NOQA - env = builtins.__xonsh__.env - enc = env.get("XONSH_ENCODING") - err = env.get("XONSH_ENCODING_ERRORS") + if self.stdin is not None: + sp_stdin = io.TextIOWrapper(self.stdin) + else: + sp_stdin = io.StringIO("") + if ON_WINDOWS: - if self.p2cread != -1: - self.p2cread = msvcrt.open_osfhandle(self.p2cread.Detach(), 0) if self.c2pwrite != -1: self.c2pwrite = msvcrt.open_osfhandle(self.c2pwrite.Detach(), 0) if self.errwrite != -1: self.errwrite = msvcrt.open_osfhandle(self.errwrite.Detach(), 0) - # get stdin - if self.stdin is None: - sp_stdin = None - elif self.p2cread != -1: - sp_stdin = io.TextIOWrapper( - io.open(self.p2cread, "rb", -1), encoding=enc, errors=err - ) - else: - sp_stdin = sys.stdin - # stdout + if self.c2pwrite != -1: - sp_stdout = io.TextIOWrapper( - io.open(self.c2pwrite, "wb", -1), encoding=enc, errors=err - ) + sp_stdout = io.TextIOWrapper(io.open(self.c2pwrite, 'wb', -1)) else: sp_stdout = sys.stdout - # stderr if self.errwrite == self.c2pwrite: sp_stderr = sp_stdout elif self.errwrite != -1: - sp_stderr = io.TextIOWrapper( - io.open(self.errwrite, "wb", -1), encoding=enc, errors=err - ) + sp_stderr = io.TextIOWrapper(io.open(self.errwrite, 'wb', -1)) else: sp_stderr = sys.stderr - # run the function itself - try: - with STDOUT_DISPATCHER.register(sp_stdout), STDERR_DISPATCHER.register( - sp_stderr - ), redirect_stdout(STDOUT_DISPATCHER), redirect_stderr(STDERR_DISPATCHER): - r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr, spec, spec.stack) - except SystemExit as e: - r = e.code if isinstance(e.code, int) else int(bool(e.code)) - except OSError: - status = still_writable(self.c2pwrite) and still_writable(self.errwrite) - if status: - # stdout and stderr are still writable, so error must - # come from function itself. - print_exception() - r = 1 - else: - # stdout and stderr are no longer writable, so error must - # come from the fact that the next process in the pipeline - # has closed the other side of the pipe. The function then - # attempted to write to this side of the pipe anyway. This - # is not truly an error and we should exit gracefully. - r = 0 - except Exception: - print_exception() - r = 1 - safe_flush(sp_stdout) - safe_flush(sp_stderr) - self.returncode = parse_proxy_return(r, sp_stdout, sp_stderr) - if not last_in_pipeline and not ON_WINDOWS: - # mac requires us *not to* close the handles here while - # windows requires us *to* close the handles here - return - # clean up - # scopz: not sure why this is needed, but stdin cannot go here - # and stdout & stderr must. - handles = [self.stdout, self.stderr] - for handle in handles: - safe_fdclose(handle, cache=self._closed_handle_cache) - def _wait_and_getattr(self, name): - """make sure the instance has a certain attr, and return it.""" - while not hasattr(self, name): - time.sleep(1e-7) - return getattr(self, name) + r = self.f(self.args, sp_stdin, sp_stdout, sp_stderr) + self.returncode = 0 if r is None else r def poll(self): """Check if the function has completed. - Returns - ------- - None if the function is still executing, and the returncode otherwise + :return: `None` if the function is still executing, `True` if the + function finished successfully, and `False` if there was an + error """ return self.returncode - def wait(self, timeout=None): - """Waits for the process to finish and returns the return code.""" - self.join() - self._restore_sigint() - return self.returncode - - # - # SIGINT handler - # - - def _signal_int(self, signum, frame): - """Signal handler for SIGINT - Ctrl+C may have been pressed.""" - # Check if we have already been interrupted. This should prevent - # the possibility of infinite recursion. - if self._interrupted: - return - self._interrupted = True - # close file handles here to stop an processes piped to us. - handles = ( - self.p2cread, - self.p2cwrite, - self.c2pread, - self.c2pwrite, - self.errread, - self.errwrite, - ) - for handle in handles: - safe_fdclose(handle) - if self.poll() is not None: - self._restore_sigint(frame=frame) - if on_main_thread(): - signal.pthread_kill(threading.get_ident(), signal.SIGINT) - - def _restore_sigint(self, frame=None): - old = self.old_int_handler - if old is not None: - if on_main_thread(): - signal.signal(signal.SIGINT, old) - self.old_int_handler = None - if frame is not None: - if old is not None and old is not self._signal_int: - old(signal.SIGINT, frame) - if self._interrupted: - self.returncode = 1 - # The code below (_get_devnull, _get_handles, and _make_inheritable) comes # from subprocess.py in the Python 3.4.2 Standard Library def _get_devnull(self): - if not hasattr(self, "_devnull"): + if not hasattr(self, '_devnull'): self._devnull = os.open(os.devnull, os.O_RDWR) return self._devnull if ON_WINDOWS: - def _make_inheritable(self, handle): """Return a duplicate of handle, which is inheritable""" h = _winapi.DuplicateHandle( - _winapi.GetCurrentProcess(), - handle, - _winapi.GetCurrentProcess(), - 0, - 1, - _winapi.DUPLICATE_SAME_ACCESS, - ) + _winapi.GetCurrentProcess(), handle, + _winapi.GetCurrentProcess(), 0, 1, + _winapi.DUPLICATE_SAME_ACCESS) return Handle(h) def _get_handles(self, stdin, stdout, stderr): @@ -1540,9 +213,10 @@ def _get_handles(self, stdin, stdout, stderr): p2cread, _ = _winapi.CreatePipe(None, 0) p2cread = Handle(p2cread) _winapi.CloseHandle(_) - elif stdin == subprocess.PIPE: + elif stdin == PIPE: + p2cread, p2cwrite = _winapi.CreatePipe(None, 0) p2cread, p2cwrite = Handle(p2cread), Handle(p2cwrite) - elif stdin == subprocess.DEVNULL: + elif stdin == DEVNULL: p2cread = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdin, int): p2cread = msvcrt.get_osfhandle(stdin) @@ -1557,10 +231,10 @@ def _get_handles(self, stdin, stdout, stderr): _, c2pwrite = _winapi.CreatePipe(None, 0) c2pwrite = Handle(c2pwrite) _winapi.CloseHandle(_) - elif stdout == subprocess.PIPE: + elif stdout == PIPE: c2pread, c2pwrite = _winapi.CreatePipe(None, 0) c2pread, c2pwrite = Handle(c2pread), Handle(c2pwrite) - elif stdout == subprocess.DEVNULL: + elif stdout == DEVNULL: c2pwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stdout, int): c2pwrite = msvcrt.get_osfhandle(stdout) @@ -1575,12 +249,12 @@ def _get_handles(self, stdin, stdout, stderr): _, errwrite = _winapi.CreatePipe(None, 0) errwrite = Handle(errwrite) _winapi.CloseHandle(_) - elif stderr == subprocess.PIPE: + elif stderr == PIPE: errread, errwrite = _winapi.CreatePipe(None, 0) errread, errwrite = Handle(errread), Handle(errwrite) - elif stderr == subprocess.STDOUT: + elif stderr == STDOUT: errwrite = c2pwrite - elif stderr == subprocess.DEVNULL: + elif stderr == DEVNULL: errwrite = msvcrt.get_osfhandle(self._get_devnull()) elif isinstance(stderr, int): errwrite = msvcrt.get_osfhandle(stderr) @@ -1589,7 +263,9 @@ def _get_handles(self, stdin, stdout, stderr): errwrite = msvcrt.get_osfhandle(stderr.fileno()) errwrite = self._make_inheritable(errwrite) - return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) else: # POSIX versions @@ -1603,9 +279,9 @@ def _get_handles(self, stdin, stdout, stderr): if stdin is None: pass - elif stdin == subprocess.PIPE: + elif stdin == PIPE: p2cread, p2cwrite = os.pipe() - elif stdin == subprocess.DEVNULL: + elif stdin == DEVNULL: p2cread = self._get_devnull() elif isinstance(stdin, int): p2cread = stdin @@ -1615,9 +291,9 @@ def _get_handles(self, stdin, stdout, stderr): if stdout is None: pass - elif stdout == subprocess.PIPE: + elif stdout == PIPE: c2pread, c2pwrite = os.pipe() - elif stdout == subprocess.DEVNULL: + elif stdout == DEVNULL: c2pwrite = self._get_devnull() elif isinstance(stdout, int): c2pwrite = stdout @@ -1627,11 +303,11 @@ def _get_handles(self, stdin, stdout, stderr): if stderr is None: pass - elif stderr == subprocess.PIPE: + elif stderr == PIPE: errread, errwrite = os.pipe() - elif stderr == subprocess.STDOUT: + elif stderr == STDOUT: errwrite = c2pwrite - elif stderr == subprocess.DEVNULL: + elif stderr == DEVNULL: errwrite = self._get_devnull() elif isinstance(stderr, int): errwrite = stderr @@ -1639,855 +315,105 @@ def _get_handles(self, stdin, stdout, stderr): # Assuming file-like object errwrite = stderr.fileno() - return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite) - + return (p2cread, p2cwrite, + c2pread, c2pwrite, + errread, errwrite) -# -# Foreground Thread Process Proxies -# +class SimpleProcProxy(ProcProxy): + """Variant of `ProcProxy` for simpler functions. -class ProcProxy(object): - """This is process proxy class that runs its alias functions on the - same thread that it was called from, which is typically the main thread. - This prevents the process from running on a background thread, but enables - debugger and profiler tools (functions) be run on the same thread that they - are attempting to debug. + The function passed into the initializer for `SimpleProcProxy` should have + the form described in the xonsh tutorial. This function is then wrapped to + make a new function of the form expected by `ProcProxy`. """ - def __init__( - self, - f, - args, - stdin=None, - stdout=None, - stderr=None, - universal_newlines=False, - close_fds=False, - env=None, - ): - self.orig_f = f - self.f = partial_proxy(f) - self.args = args - self.pid = os.getpid() - self.returncode = None - self.stdin = stdin - self.stdout = stdout - self.stderr = stderr - self.universal_newlines = universal_newlines - self.close_fds = close_fds - self.env = env - - def poll(self): - """Check if the function has completed via the returncode or None. - """ - return self.returncode - - def wait(self, timeout=None): - """Runs the function and returns the result. Timeout argument only - present for API compatibility. - """ - if self.f is None: - return 0 - env = builtins.__xonsh__.env - enc = env.get("XONSH_ENCODING") - err = env.get("XONSH_ENCODING_ERRORS") - spec = self._wait_and_getattr("spec") - # set file handles - if self.stdin is None: - stdin = None - else: - if isinstance(self.stdin, int): - inbuf = io.open(self.stdin, "rb", -1) - else: - inbuf = self.stdin - stdin = io.TextIOWrapper(inbuf, encoding=enc, errors=err) - stdout = self._pick_buf(self.stdout, sys.stdout, enc, err) - stderr = self._pick_buf(self.stderr, sys.stderr, enc, err) - # run the actual function - try: - r = self.f(self.args, stdin, stdout, stderr, spec, spec.stack) - except Exception: - print_exception() - r = 1 - self.returncode = parse_proxy_return(r, stdout, stderr) - safe_flush(stdout) - safe_flush(stderr) - return self.returncode - - @staticmethod - def _pick_buf(handle, sysbuf, enc, err): - if handle is None or handle is sysbuf: - buf = sysbuf - elif isinstance(handle, int): - if handle < 3: - buf = sysbuf - else: - buf = io.TextIOWrapper( - io.open(handle, "wb", -1), encoding=enc, errors=err - ) - elif hasattr(handle, "encoding"): - # must be a text stream, no need to wrap. - buf = handle - else: - # must be a binary stream, should wrap it. - buf = io.TextIOWrapper(handle, encoding=enc, errors=err) - return buf - - def _wait_and_getattr(self, name): - """make sure the instance has a certain attr, and return it.""" - while not hasattr(self, name): - time.sleep(1e-7) - return getattr(self, name) - - -@lazyobject -def SIGNAL_MESSAGES(): - sm = { - signal.SIGABRT: "Aborted", - signal.SIGFPE: "Floating point exception", - signal.SIGILL: "Illegal instructions", - signal.SIGTERM: "Terminated", - signal.SIGSEGV: "Segmentation fault", - } - if ON_POSIX: - sm.update( - {signal.SIGQUIT: "Quit", signal.SIGHUP: "Hangup", signal.SIGKILL: "Killed"} - ) - return sm - - -def safe_readlines(handle, hint=-1): - """Attempts to read lines without throwing an error.""" - try: - lines = handle.readlines(hint) - except OSError: - lines = [] - return lines - - -def safe_readable(handle): - """Attempts to find if the handle is readable without throwing an error.""" - try: - status = handle.readable() - except (OSError, ValueError): - status = False - return status - - -def update_fg_process_group(pipeline_group, background): - if background: - return False - if not ON_POSIX: - return False - env = builtins.__xonsh__.env - if not env.get("XONSH_INTERACTIVE"): - return False - return give_terminal_to(pipeline_group) - - -class CommandPipeline: - """Represents a subprocess-mode command pipeline.""" - - attrnames = ( - "stdin", - "stdout", - "stderr", - "pid", - "returncode", - "args", - "alias", - "stdin_redirect", - "stdout_redirect", - "stderr_redirect", - "timestamps", - "executed_cmd", - "input", - "output", - "errors", - ) - - nonblocking = (io.BytesIO, NonBlockingFDReader, ConsoleParallelReader) - - def __init__(self, specs): - """ - Parameters - ---------- - specs : list of SubprocSpec - Process specifications - - Attributes - ---------- - spec : SubprocSpec - The last specification in specs - proc : Popen-like - The process in procs - ended : bool - Boolean for if the command has stopped executing. - input : str - A string of the standard input. - output : str - A string of the standard output. - errors : str - A string of the standard error. - lines : list of str - The output lines - starttime : floats or None - Pipeline start timestamp. - """ - self.starttime = None - self.ended = False - self.procs = [] - self.specs = specs - self.spec = specs[-1] - self.captured = specs[-1].captured - self.input = self._output = self.errors = self.endtime = None - self._closed_handle_cache = {} - self.lines = [] - self._stderr_prefix = self._stderr_postfix = None - self.term_pgid = None - - background = self.spec.background - pipeline_group = None - for spec in specs: - if self.starttime is None: - self.starttime = time.time() + def __init__(self, f, args, stdin=None, stdout=None, stderr=None, + universal_newlines=False): + def wrapped_simple_command(args, stdin, stdout, stderr): try: - proc = spec.run(pipeline_group=pipeline_group) + i = stdin.read() + with redirect_stdout(stdout), redirect_stderr(stderr): + r = f(args, i) + if isinstance(r, str): + stdout.write(r) + elif isinstance(r, Sequence): + if r[0] is not None: + stdout.write(r[0]) + if r[1] is not None: + stderr.write(r[1]) + elif r is not None: + stdout.write(str(r)) + return 0 # returncode for succees except Exception: print_exception() - self._return_terminal() - self.proc = None - return - if ( - proc.pid - and pipeline_group is None - and not spec.is_proxy - and self.captured != "object" - ): - pipeline_group = proc.pid - if update_fg_process_group(pipeline_group, background): - self.term_pgid = pipeline_group - self.procs.append(proc) - self.proc = self.procs[-1] + return 1 # returncode for failure + super().__init__(wrapped_simple_command, + args, stdin, stdout, stderr, + universal_newlines) - def __repr__(self): - s = self.__class__.__name__ + "(" - s += ", ".join(a + "=" + str(getattr(self, a)) for a in self.attrnames) - s += ")" - return s - def __bool__(self): - return self.returncode == 0 +@fallback(ON_LINUX, Popen) +class TeePTYProc(object): - def __len__(self): - return len(self.procs) - - def __iter__(self): - """Iterates through stdout and returns the lines, converting to - strings and universal newlines if needed. - """ - if self.ended: - yield from iter(self.lines) - else: - yield from self.tee_stdout() - - def iterraw(self): - """Iterates through the last stdout, and returns the lines - exactly as found. + def __init__(self, args, stdin=None, stdout=None, stderr=None, preexec_fn=None, + env=None, universal_newlines=False): + """Popen replacement for running commands in teed psuedo-terminal. This + allows the capturing AND streaming of stdout and stderr. Availability + is Linux-only. """ - # get appropriate handles - spec = self.spec - proc = self.proc - if proc is None: - return - timeout = builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY") - # get the correct stdout - stdout = proc.stdout - if ( - stdout is None or spec.stdout is None or not safe_readable(stdout) - ) and spec.captured_stdout is not None: - stdout = spec.captured_stdout - if hasattr(stdout, "buffer"): - stdout = stdout.buffer - if stdout is not None and not isinstance(stdout, self.nonblocking): - stdout = NonBlockingFDReader(stdout.fileno(), timeout=timeout) - if ( - not stdout - or self.captured == "stdout" - or not safe_readable(stdout) - or not spec.threadable - ): - # we get here if the process is not threadable or the - # class is the real Popen - PrevProcCloser(pipeline=self) - task = wait_for_active_job() - if task is None or task["status"] != "stopped": - proc.wait() - self._endtime() - if self.captured == "object": - self.end(tee_output=False) - elif self.captured == "hiddenobject" and stdout: - b = stdout.read() - lines = b.splitlines(keepends=True) - yield from lines - self.end(tee_output=False) - elif self.captured == "stdout": - b = stdout.read() - s = self._decode_uninew(b, universal_newlines=True) - self.lines = s.splitlines(keepends=True) - return - # get the correct stderr - stderr = proc.stderr - if ( - stderr is None or spec.stderr is None or not safe_readable(stderr) - ) and spec.captured_stderr is not None: - stderr = spec.captured_stderr - if hasattr(stderr, "buffer"): - stderr = stderr.buffer - if stderr is not None and not isinstance(stderr, self.nonblocking): - stderr = NonBlockingFDReader(stderr.fileno(), timeout=timeout) - # read from process while it is running - check_prev_done = len(self.procs) == 1 - prev_end_time = None - i = j = cnt = 1 - while proc.poll() is None: - if getattr(proc, "suspended", False): - return - elif getattr(proc, "in_alt_mode", False): - time.sleep(0.1) # probably not leaving any time soon - continue - elif not check_prev_done: - # In the case of pipelines with more than one command - # we should give the commands a little time - # to start up fully. This is particularly true for - # GNU Parallel, which has a long startup time. - pass - elif self._prev_procs_done(): - self._close_prev_procs() - proc.prevs_are_closed = True - break - stdout_lines = safe_readlines(stdout, 1024) - i = len(stdout_lines) - if i != 0: - yield from stdout_lines - stderr_lines = safe_readlines(stderr, 1024) - j = len(stderr_lines) - if j != 0: - self.stream_stderr(stderr_lines) - if not check_prev_done: - # if we are piping... - if stdout_lines or stderr_lines: - # see if we have some output. - check_prev_done = True - elif prev_end_time is None: - # or see if we already know that the next-to-last - # proc in the pipeline has ended. - if self._prev_procs_done(): - # if it has, record the time - prev_end_time = time.time() - elif time.time() - prev_end_time >= 0.1: - # if we still don't have any output, even though the - # next-to-last proc has finished, wait a bit to make - # sure we have fully started up, etc. - check_prev_done = True - # this is for CPU usage - if i + j == 0: - cnt = min(cnt + 1, 1000) - else: - cnt = 1 - time.sleep(timeout * cnt) - # read from process now that it is over - yield from safe_readlines(stdout) - self.stream_stderr(safe_readlines(stderr)) - proc.wait() - self._endtime() - yield from safe_readlines(stdout) - self.stream_stderr(safe_readlines(stderr)) - if self.captured == "object": - self.end(tee_output=False) - - def itercheck(self): - """Iterates through the command lines and throws an error if the - returncode is non-zero. - """ - yield from self - if self.returncode: - # I included self, as providing access to stderr and other details - # useful when instance isn't assigned to a variable in the shell. - raise XonshCalledProcessError( - self.returncode, self.executed_cmd, self.stdout, self.stderr, self - ) - - def tee_stdout(self): - """Writes the process stdout to the output variable, line-by-line, and - yields each line. This may optionally accept lines (in bytes) to iterate - over, in which case it does not call iterraw(). - """ - env = builtins.__xonsh__.env - enc = env.get("XONSH_ENCODING") - err = env.get("XONSH_ENCODING_ERRORS") - lines = self.lines - stream = self.captured not in STDOUT_CAPTURE_KINDS - if stream and not self.spec.stdout: - stream = False - stdout_has_buffer = hasattr(sys.stdout, "buffer") - nl = b"\n" - cr = b"\r" - crnl = b"\r\n" - for line in self.iterraw(): - # write to stdout line ASAP, if needed - if stream: - if stdout_has_buffer: - sys.stdout.buffer.write(line) - else: - sys.stdout.write(line.decode(encoding=enc, errors=err)) - sys.stdout.flush() - # do some munging of the line before we return it - if line.endswith(crnl): - line = line[:-2] + nl - elif line.endswith(cr): - line = line[:-1] + nl - line = RE_HIDE_ESCAPE.sub(b"", line) - line = line.decode(encoding=enc, errors=err) - # tee it up! - lines.append(line) - yield line - - def stream_stderr(self, lines): - """Streams lines to sys.stderr and the errors attribute.""" - if not lines: - return - env = builtins.__xonsh__.env - enc = env.get("XONSH_ENCODING") - err = env.get("XONSH_ENCODING_ERRORS") - b = b"".join(lines) - if self.stderr_prefix: - b = self.stderr_prefix + b - if self.stderr_postfix: - b += self.stderr_postfix - stderr_has_buffer = hasattr(sys.stderr, "buffer") - # write bytes to std stream - if stderr_has_buffer: - sys.stderr.buffer.write(b) - else: - sys.stderr.write(b.decode(encoding=enc, errors=err)) - sys.stderr.flush() - # do some munging of the line before we save it to the attr - b = b.replace(b"\r\n", b"\n").replace(b"\r", b"\n") - b = RE_HIDE_ESCAPE.sub(b"", b) - env = builtins.__xonsh__.env - s = b.decode( - encoding=env.get("XONSH_ENCODING"), errors=env.get("XONSH_ENCODING_ERRORS") - ) - # set the errors - if self.errors is None: - self.errors = s - else: - self.errors += s - - def _decode_uninew(self, b, universal_newlines=None): - """Decode bytes into a str and apply universal newlines as needed.""" - if not b: - return "" - if isinstance(b, bytes): - env = builtins.__xonsh__.env - s = b.decode( - encoding=env.get("XONSH_ENCODING"), - errors=env.get("XONSH_ENCODING_ERRORS"), - ) - else: - s = b - if universal_newlines or self.spec.universal_newlines: - s = s.replace("\r\n", "\n").replace("\r", "\n") - return s - - # - # Ending methods - # - - def end(self, tee_output=True): - """ - End the pipeline, return the controlling terminal if needed. - - Main things done in self._end(). - """ - if self.ended: - return - self._end(tee_output=tee_output) - self._return_terminal() - - def _end(self, tee_output): - """Waits for the command to complete and then runs any closing and - cleanup procedures that need to be run. - """ - if tee_output: - for _ in self.tee_stdout(): - pass - self._endtime() - # since we are driven by getting output, input may not be available - # until the command has completed. - self._set_input() - self._close_prev_procs() - self._close_proc() - self._check_signal() - self._apply_to_history() - self.ended = True - self._raise_subproc_error() - - def _return_terminal(self): - if ON_WINDOWS or not ON_POSIX: - return - pgid = os.getpgid(0) - if self.term_pgid is None or pgid == self.term_pgid: - return - if give_terminal_to(pgid): # if gave term succeed - self.term_pgid = pgid - if builtins.__xonsh__.shell is not None: - # restoring sanity could probably be called whenever we return - # control to the shell. But it only seems to matter after a - # ^Z event. This *has* to be called after we give the terminal - # back to the shell. - builtins.__xonsh__.shell.shell.restore_tty_sanity() - - def resume(self, job, tee_output=True): - self.ended = False - if give_terminal_to(job["pgrp"]): - self.term_pgid = job["pgrp"] - _continue(job) - self.end(tee_output=tee_output) - - def _endtime(self): - """Sets the closing timestamp if it hasn't been already.""" - if self.endtime is None: - self.endtime = time.time() - - def _safe_close(self, handle): - safe_fdclose(handle, cache=self._closed_handle_cache) - - def _prev_procs_done(self): - """Boolean for if all previous processes have completed. If there - is only a single process in the pipeline, this returns False. - """ - any_running = False - for s, p in zip(self.specs[:-1], self.procs[:-1]): - if p.poll() is None: - any_running = True - continue - self._safe_close(s.stdin) - self._safe_close(s.stdout) - self._safe_close(s.stderr) - if p is None: - continue - self._safe_close(p.stdin) - self._safe_close(p.stdout) - self._safe_close(p.stderr) - return False if any_running else (len(self) > 1) - - def _close_prev_procs(self): - """Closes all but the last proc's stdout.""" - for s, p in zip(self.specs[:-1], self.procs[:-1]): - self._safe_close(s.stdin) - self._safe_close(s.stdout) - self._safe_close(s.stderr) - if p is None: - continue - self._safe_close(p.stdin) - self._safe_close(p.stdout) - self._safe_close(p.stderr) - - def _close_proc(self): - """Closes last proc's stdout.""" - s = self.spec - p = self.proc - self._safe_close(s.stdin) - self._safe_close(s.stdout) - self._safe_close(s.stderr) - self._safe_close(s.captured_stdout) - self._safe_close(s.captured_stderr) - if p is None: - return - self._safe_close(p.stdin) - self._safe_close(p.stdout) - self._safe_close(p.stderr) - - def _set_input(self): - """Sets the input variable.""" - if self.proc is None: - return - stdin = self.proc.stdin - if ( - stdin is None - or isinstance(stdin, int) - or stdin.closed - or not stdin.seekable() - or not safe_readable(stdin) - ): - input = b"" - else: - stdin.seek(0) - input = stdin.read() - self.input = self._decode_uninew(input) - - def _check_signal(self): - """Checks if a signal was received and issues a message.""" - proc_signal = getattr(self.proc, "signal", None) - if proc_signal is None: - return - sig, core = proc_signal - sig_str = SIGNAL_MESSAGES.get(sig) - if sig_str: - if core: - sig_str += " (core dumped)" - print(sig_str, file=sys.stderr) - if self.errors is not None: - self.errors += sig_str + "\n" - - def _apply_to_history(self): - """Applies the results to the current history object.""" - hist = builtins.__xonsh__.history - if hist is not None: - hist.last_cmd_rtn = 1 if self.proc is None else self.proc.returncode - - def _raise_subproc_error(self): - """Raises a subprocess error, if we are supposed to.""" - spec = self.spec - rtn = self.returncode - if ( - rtn is not None - and rtn > 0 - and builtins.__xonsh__.env.get("RAISE_SUBPROC_ERROR") - ): - try: - raise subprocess.CalledProcessError(rtn, spec.args, output=self.output) - finally: - # this is need to get a working terminal in interactive mode - self._return_terminal() - - # - # Properties - # - - @property - def stdin(self): - """Process stdin.""" - return self.proc.stdin - - @property - def stdout(self): - """Process stdout.""" - return self.proc.stdout - - @property - def stderr(self): - """Process stderr.""" - return self.proc.stderr - - @property - def inp(self): - """Creates normalized input string from args.""" - return " ".join(self.args) - - @property - def output(self): - """Non-blocking, lazy access to output""" - if self.ended: - if self._output is None: - self._output = "".join(self.lines) - return self._output - else: - return "".join(self.lines) - - @property - def out(self): - """Output value as a str.""" - self.end() - return self.output - - @property - def err(self): - """Error messages as a string.""" - self.end() - return self.errors + self.stdin = stdin + self._stdout = stdout + self._stderr = stderr + self.args = args + self.universal_newlines = universal_newlines + xenv = builtins.__xonsh_env__ if hasattr(builtins, '__xonsh_env__') \ + else {'XONSH_ENCODING': 'utf-8', + 'XONSH_ENCODING_ERRORS': 'strict'} + + if not os.access(args[0], os.F_OK): + raise FileNotFoundError('command {0!r} not found'.format(args[0])) + elif not os.access(args[0], os.X_OK) or os.path.isdir(args[0]): + raise PermissionError('permission denied: {0!r}'.format(args[0])) + self._tpty = tpty = TeePTY(encoding=xenv.get('XONSH_ENCODING'), + errors=xenv.get('XONSH_ENCODING_ERRORS')) + if preexec_fn is not None: + preexec_fn() + delay = xenv.get('TEEPTY_PIPE_DELAY') + tpty.spawn(args, env=env, stdin=stdin, delay=delay) @property def pid(self): - """Process identifier.""" - return self.proc.pid + """The pid of the spawned process.""" + return self._tpty.pid @property def returncode(self): - """Process return code, waits until command is completed.""" - self.end() - if self.proc is None: - return 1 - return self.proc.returncode - - rtn = returncode - - @property - def args(self): - """Arguments to the process.""" - return self.spec.args - - @property - def rtn(self): - """Alias to return code.""" - return self.returncode - - @property - def alias(self): - """Alias the process used.""" - return self.spec.alias + """The return code of the spawned process.""" + return self._tpty.returncode - @property - def stdin_redirect(self): - """Redirection used for stdin.""" - stdin = self.spec.stdin - name = getattr(stdin, "name", "") - mode = getattr(stdin, "mode", "r") - return [name, mode] - - @property - def stdout_redirect(self): - """Redirection used for stdout.""" - stdout = self.spec.stdout - name = getattr(stdout, "name", "") - mode = getattr(stdout, "mode", "a") - return [name, mode] - - @property - def stderr_redirect(self): - """Redirection used for stderr.""" - stderr = self.spec.stderr - name = getattr(stderr, "name", "") - mode = getattr(stderr, "mode", "r") - return [name, mode] - - @property - def timestamps(self): - """The start and end time stamps.""" - return [self.starttime, self.endtime] - - @property - def executed_cmd(self): - """The resolve and executed command.""" - return self.spec.cmd + def poll(self): + """Polls the spawned process and returns the returncode.""" + return self._tpty.returncode - @property - def stderr_prefix(self): - """Prefix to print in front of stderr, as bytes.""" - p = self._stderr_prefix - if p is None: - env = builtins.__xonsh__.env - t = env.get("XONSH_STDERR_PREFIX") - s = format_std_prepost(t, env=env) - p = s.encode( - encoding=env.get("XONSH_ENCODING"), - errors=env.get("XONSH_ENCODING_ERRORS"), - ) - self._stderr_prefix = p - return p + def wait(self, timeout=None): + """Waits for the spawned process to finish, up to a timeout.""" + tpty = self._tpty + t0 = time.time() + while tpty.returncode is None: + if timeout is not None and timeout < (time.time() - t0): + raise TimeoutExpired + return tpty.returncode @property - def stderr_postfix(self): - """Postfix to print after stderr, as bytes.""" - p = self._stderr_postfix - if p is None: - env = builtins.__xonsh__.env - t = env.get("XONSH_STDERR_POSTFIX") - s = format_std_prepost(t, env=env) - p = s.encode( - encoding=env.get("XONSH_ENCODING"), - errors=env.get("XONSH_ENCODING_ERRORS"), - ) - self._stderr_postfix = p - return p - - -class HiddenCommandPipeline(CommandPipeline): - def __repr__(self): - return "" - - -def pause_call_resume(p, f, *args, **kwargs): - """For a process p, this will call a function f with the remaining args and - and kwargs. If the process cannot accept signals, the function will be called. - - Parameters - ---------- - p : Popen object or similar - f : callable - args : remaining arguments - kwargs : keyword arguments - """ - can_send_signal = ( - hasattr(p, "send_signal") and ON_POSIX and not ON_MSYS and not ON_CYGWIN - ) - if can_send_signal: - try: - p.send_signal(signal.SIGSTOP) - except PermissionError: - pass - try: - f(*args, **kwargs) - except Exception: - pass - if can_send_signal: - p.send_signal(signal.SIGCONT) - - -class PrevProcCloser(threading.Thread): - """Previous process closer thread for pipelines whose last command - is itself unthreadable. This makes sure that the pipeline is - driven forward and does not deadlock. - """ - - def __init__(self, pipeline): - """ - Parameters - ---------- - pipeline : CommandPipeline - The pipeline whose prev procs we should close. + def stdout(self): + """The stdout (and stderr) that was tee'd into a buffer by the psuedo-terminal. """ - self.pipeline = pipeline - super().__init__() - self.daemon = True - self.start() - - def run(self): - """Runs the closing algorithm.""" - pipeline = self.pipeline - check_prev_done = len(pipeline.procs) == 1 - if check_prev_done: - return - proc = pipeline.proc - prev_end_time = None - timeout = builtins.__xonsh__.env.get("XONSH_PROC_FREQUENCY") - sleeptime = min(timeout * 1000, 0.1) - while proc.poll() is None: - if not check_prev_done: - # In the case of pipelines with more than one command - # we should give the commands a little time - # to start up fully. This is particularly true for - # GNU Parallel, which has a long startup time. - pass - elif pipeline._prev_procs_done(): - pipeline._close_prev_procs() - proc.prevs_are_closed = True - break - if not check_prev_done: - # if we are piping... - if prev_end_time is None: - # or see if we already know that the next-to-last - # proc in the pipeline has ended. - if pipeline._prev_procs_done(): - # if it has, record the time - prev_end_time = time.time() - elif time.time() - prev_end_time >= 0.1: - # if we still don't have any output, even though the - # next-to-last proc has finished, wait a bit to make - # sure we have fully started up, etc. - check_prev_done = True - # this is for CPU usage - time.sleep(sleeptime) + if self._stdout is not None: + pass + elif self.universal_newlines: + self._stdout = io.StringIO(str(self._tpty)) + self._stdout.seek(0) + else: + self._stdout = self._tpty.buffer + return self._stdout diff --git a/xonsh/prompt/__init__.py b/xonsh/prompt/__init__.py deleted file mode 100644 index 912f670..0000000 --- a/xonsh/prompt/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -# amalgamate exclude -import os as _os - -if _os.getenv("XONSH_DEBUG", ""): - pass -else: - import sys as _sys - - try: - from xonsh.prompt import __amalgam__ - - cwd = __amalgam__ - _sys.modules["xonsh.prompt.cwd"] = __amalgam__ - env = __amalgam__ - _sys.modules["xonsh.prompt.env"] = __amalgam__ - gitstatus = __amalgam__ - _sys.modules["xonsh.prompt.gitstatus"] = __amalgam__ - job = __amalgam__ - _sys.modules["xonsh.prompt.job"] = __amalgam__ - vc = __amalgam__ - _sys.modules["xonsh.prompt.vc"] = __amalgam__ - base = __amalgam__ - _sys.modules["xonsh.prompt.base"] = __amalgam__ - del __amalgam__ - except ImportError: - pass - del _sys -del _os -# amalgamate end diff --git a/xonsh/prompt/base.py b/xonsh/prompt/base.py deleted file mode 100644 index cdef679..0000000 --- a/xonsh/prompt/base.py +++ /dev/null @@ -1,219 +0,0 @@ -# -*- coding: utf-8 -*- -"""Base prompt, provides PROMPT_FIELDS and prompt related functions""" - -import builtins -import itertools -import os -import re -import socket -import sys - -import xonsh.lazyasd as xl -import xonsh.tools as xt -import xonsh.platform as xp - -from xonsh.prompt.cwd import ( - _collapsed_pwd, - _replace_home_cwd, - _dynamically_collapsed_pwd, -) -from xonsh.prompt.job import _current_job -from xonsh.prompt.env import env_name, vte_new_tab_cwd -from xonsh.prompt.vc import current_branch, branch_color, branch_bg_color -from xonsh.prompt.gitstatus import gitstatus_prompt - - -@xt.lazyobject -def DEFAULT_PROMPT(): - return default_prompt() - - -class PromptFormatter: - """Class that holds all the related prompt formatting methods, - uses the ``PROMPT_FIELDS`` envvar (no color formatting). - """ - - def __init__(self): - self.cache = {} - - def __call__(self, template=DEFAULT_PROMPT, fields=None): - """Formats a xonsh prompt template string.""" - if fields is None: - self.fields = builtins.__xonsh__.env.get("PROMPT_FIELDS", PROMPT_FIELDS) - else: - self.fields = fields - try: - prompt = self._format_prompt(template=template) - except Exception: - return _failover_template_format(template) - # keep cache only during building prompt - self.cache.clear() - return prompt - - def _format_prompt(self, template=DEFAULT_PROMPT): - template = template() if callable(template) else template - toks = [] - for literal, field, spec, conv in xt.FORMATTER.parse(template): - toks.append(literal) - entry = self._format_field(field, spec, conv) - if entry is not None: - toks.append(entry) - return "".join(toks) - - def _format_field(self, field, spec, conv): - if field is None: - return - elif field.startswith("$"): - val = builtins.__xonsh__.env[field[1:]] - return _format_value(val, spec, conv) - elif field in self.fields: - val = self._get_field_value(field) - return _format_value(val, spec, conv) - else: - # color or unknown field, return as is - return "{" + field + "}" - - def _get_field_value(self, field): - field_value = self.fields[field] - if field_value in self.cache: - return self.cache[field_value] - try: - value = field_value() if callable(field_value) else field_value - self.cache[field_value] = value - except Exception: - print("prompt: error: on field {!r}" "".format(field), file=sys.stderr) - xt.print_exception() - value = "(ERROR:{})".format(field) - return value - - -@xl.lazyobject -def PROMPT_FIELDS(): - return dict( - user=xp.os_environ.get("USERNAME" if xp.ON_WINDOWS else "USER", ""), - prompt_end="#" if xt.is_superuser() else "$", - hostname=socket.gethostname().split(".", 1)[0], - cwd=_dynamically_collapsed_pwd, - cwd_dir=lambda: os.path.dirname(_replace_home_cwd()), - cwd_base=lambda: os.path.basename(_replace_home_cwd()), - short_cwd=_collapsed_pwd, - curr_branch=current_branch, - branch_color=branch_color, - branch_bg_color=branch_bg_color, - current_job=_current_job, - env_name=env_name, - env_prefix="(", - env_postfix=") ", - vte_new_tab_cwd=vte_new_tab_cwd, - gitstatus=gitstatus_prompt, - ) - - -def default_prompt(): - return ('{RED}{user} ' - '{BOLD_WHITE}at ' - '{YELLOW}{hostname} ' - '{BOLD_WHITE}in ' - '{GREEN}{cwd} ' - '{BOLD_WHITE}on ' - '{branch_color}{curr_branch} ' - '{BOLD_WHITE}\n' - '${NO_COLOR} ') - - -def _failover_template_format(template): - if callable(template): - try: - # Exceptions raises from function of producing $PROMPT - # in user's xonshrc should not crash xonsh - return template() - except Exception: - xt.print_exception() - return "$ " - return template - - -@xt.lazyobject -def RE_HIDDEN(): - return re.compile("\001.*?\002") - - -def multiline_prompt(curr=""): - """Returns the filler text for the prompt in multiline scenarios.""" - line = curr.rsplit("\n", 1)[1] if "\n" in curr else curr - line = RE_HIDDEN.sub("", line) # gets rid of colors - # most prompts end in whitespace, head is the part before that. - head = line.rstrip() - headlen = len(head) - # tail is the trailing whitespace - tail = line if headlen == 0 else line.rsplit(head[-1], 1)[1] - # now to construct the actual string - dots = builtins.__xonsh__.env.get("MULTILINE_PROMPT") - dots = dots() if callable(dots) else dots - if dots is None or len(dots) == 0: - return "" - tokstr = xt.format_color(dots, hide=True) - baselen = 0 - basetoks = [] - for x in tokstr.split("\001"): - pre, sep, post = x.partition("\002") - if len(sep) == 0: - basetoks.append(("", pre)) - baselen += len(pre) - else: - basetoks.append(("\001" + pre + "\002", post)) - baselen += len(post) - if baselen == 0: - return xt.format_color("{NO_COLOR}" + tail, hide=True) - toks = basetoks * (headlen // baselen) - n = headlen % baselen - count = 0 - for tok in basetoks: - slen = len(tok[1]) - newcount = slen + count - if slen == 0: - continue - elif newcount <= n: - toks.append(tok) - else: - toks.append((tok[0], tok[1][: n - count])) - count = newcount - if n <= count: - break - toks.append((xt.format_color("{NO_COLOR}", hide=True), tail)) - rtn = "".join(itertools.chain.from_iterable(toks)) - return rtn - - -def is_template_string(template, PROMPT_FIELDS=None): - """Returns whether or not the string is a valid template.""" - template = template() if callable(template) else template - try: - included_names = set(i[1] for i in xt.FORMATTER.parse(template)) - except ValueError: - return False - included_names.discard(None) - if PROMPT_FIELDS is None: - fmtter = builtins.__xonsh__.env.get("PROMPT_FIELDS", PROMPT_FIELDS) - else: - fmtter = PROMPT_FIELDS - known_names = set(fmtter.keys()) - return included_names <= known_names - - -def _format_value(val, spec, conv): - """Formats a value from a template string {val!conv:spec}. The spec is - applied as a format string itself, but if the value is None, the result - will be empty. The purpose of this is to allow optional parts in a - prompt string. For example, if the prompt contains '{current_job:{} | }', - and 'current_job' returns 'sleep', the result is 'sleep | ', and if - 'current_job' returns None, the result is ''. - """ - if val is None: - return "" - val = xt.FORMATTER.convert_field(val, conv) - if spec: - val = xt.FORMATTER.format(spec, val) - if not isinstance(val, str): - val = str(val) - return val diff --git a/xonsh/prompt/cwd.py b/xonsh/prompt/cwd.py deleted file mode 100644 index 59e00f1..0000000 --- a/xonsh/prompt/cwd.py +++ /dev/null @@ -1,91 +0,0 @@ -# -*- coding: utf-8 -*- -"""CWD related prompt formatter""" - -import os -import shutil -import builtins - -import xonsh.tools as xt -import xonsh.platform as xp - - -def _replace_home(x): - if xp.ON_WINDOWS: - home = ( - builtins.__xonsh__.env["HOMEDRIVE"] + builtins.__xonsh__.env["HOMEPATH"][0] - ) - if x.startswith(home): - x = x.replace(home, "~", 1) - - if builtins.__xonsh__.env.get("FORCE_POSIX_PATHS"): - x = x.replace(os.sep, os.altsep) - - return x - else: - home = builtins.__xonsh__.env["HOME"] - if x.startswith(home): - x = x.replace(home, "~", 1) - return x - - -def _replace_home_cwd(): - return _replace_home(builtins.__xonsh__.env["PWD"]) - - -def _collapsed_pwd(): - sep = xt.get_sep() - pwd = _replace_home_cwd().split(sep) - l = len(pwd) - leader = sep if l > 0 and len(pwd[0]) == 0 else "" - base = [ - i[0] if ix != l - 1 and i[0] != "." else i[0:2] if ix != l - 1 else i - for ix, i in enumerate(pwd) - if len(i) > 0 - ] - return leader + sep.join(base) - - -def _dynamically_collapsed_pwd(): - """Return the compact current working directory. It respects the - environment variable DYNAMIC_CWD_WIDTH. - """ - original_path = _replace_home_cwd() - target_width, units = builtins.__xonsh__.env["DYNAMIC_CWD_WIDTH"] - elision_char = builtins.__xonsh__.env["DYNAMIC_CWD_ELISION_CHAR"] - if target_width == float("inf"): - return original_path - if units == "%": - cols, _ = shutil.get_terminal_size() - target_width = (cols * target_width) // 100 - sep = xt.get_sep() - pwd = original_path.split(sep) - last = pwd.pop() - remaining_space = target_width - len(last) - # Reserve space for separators - remaining_space_for_text = remaining_space - len(pwd) - parts = [] - for i in range(len(pwd)): - part = pwd[i] - part_len = int( - min(len(part), max(1, remaining_space_for_text // (len(pwd) - i))) - ) - remaining_space_for_text -= part_len - if len(part) > part_len: - reduced_part = part[0 : part_len - len(elision_char)] + elision_char - parts.append(reduced_part) - else: - parts.append(part) - parts.append(last) - full = sep.join(parts) - truncature_char = elision_char if elision_char else "..." - # If even if displaying one letter per dir we are too long - if len(full) > target_width: - # We truncate the left most part - full = truncature_char + full[int(-target_width) + len(truncature_char) :] - # if there is not even a single separator we still - # want to display at least the beginning of the directory - if full.find(sep) == -1: - full = (truncature_char + sep + last)[ - 0 : int(target_width) - len(truncature_char) - ] + truncature_char - return full diff --git a/xonsh/prompt/env.py b/xonsh/prompt/env.py deleted file mode 100644 index 7b5267e..0000000 --- a/xonsh/prompt/env.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -"""Prompt formatter for virtualenv and others""" - -import os -import builtins - -import xonsh.platform as xp - - -def find_env_name(): - """Finds the current environment name from $VIRTUAL_ENV or - $CONDA_DEFAULT_ENV if that is set. - """ - env_path = builtins.__xonsh__.env.get("VIRTUAL_ENV", "") - if len(env_path) == 0 and xp.ON_ANACONDA: - env_path = builtins.__xonsh__.env.get("CONDA_DEFAULT_ENV", "") - env_name = os.path.basename(env_path) - return env_name - - -def env_name(): - """Returns the current env_name if it non-empty, surrounded by the - ``{env_prefix}`` and ``{env_postfix}`` fields. - """ - env_name = find_env_name() - if ( - builtins.__xonsh__.env.get("VIRTUAL_ENV_DISABLE_PROMPT") - or not env_name - ): - # env name prompt printing disabled, or no environment; just return - return - - venv_prompt = builtins.__xonsh__.env.get("VIRTUAL_ENV_PROMPT") - if venv_prompt is not None: - return venv_prompt - else: - pf = builtins.__xonsh__.shell.prompt_formatter - pre = pf._get_field_value("env_prefix") - post = pf._get_field_value("env_postfix") - return pre + env_name + post - - -def vte_new_tab_cwd(): - """This prints an escape sequence that tells VTE terminals the hostname - and pwd. This should not be needed in most cases, but sometimes is for - certain Linux terminals that do not read the PWD from the environment - on startup. Note that this does not return a string, it simply prints - and flushes the escape sequence to stdout directly. - """ - env = builtins.__xonsh__.env - t = "\033]7;file://{}{}\007" - s = t.format(env.get("HOSTNAME"), env.get("PWD")) - print(s, end="", flush=True) diff --git a/xonsh/prompt/gitstatus.py b/xonsh/prompt/gitstatus.py deleted file mode 100644 index 5dcde7f..0000000 --- a/xonsh/prompt/gitstatus.py +++ /dev/null @@ -1,199 +0,0 @@ -# -*- coding: utf-8 -*- -"""Informative git status prompt formatter""" - -import builtins -import collections -import os -import subprocess - -import xonsh.lazyasd as xl - - -GitStatus = collections.namedtuple( - "GitStatus", - [ - "branch", - "num_ahead", - "num_behind", - "untracked", - "changed", - "conflicts", - "staged", - "stashed", - "operations", - ], -) - - -def _check_output(*args, **kwargs): - kwargs.update( - dict( - env=builtins.__xonsh__.env.detype(), - stdout=subprocess.PIPE, - stderr=subprocess.DEVNULL, - universal_newlines=True, - ) - ) - timeout = builtins.__xonsh__.env["VC_BRANCH_TIMEOUT"] - # See https://docs.python.org/3/library/subprocess.html#subprocess.Popen.communicate - with subprocess.Popen(*args, **kwargs) as proc: - try: - out, err = proc.communicate(timeout=timeout) - if proc.returncode != 0: - raise subprocess.CalledProcessError( - proc.returncode, proc.args, output=out, stderr=err - ) # note err will always be empty as we redirect stderr to DEVNULL abvoe - return out - except subprocess.TimeoutExpired: - # We use `.terminate()` (SIGTERM) instead of `.kill()` (SIGKILL) here - # because otherwise we guarantee that a `.git/index.lock` file will be - # left over, and subsequent git operations will fail. - # We don't want that. - # As a result, we must rely on git to exit properly on SIGTERM. - proc.terminate() - # We wait() to ensure that git has finished before the next - # `gitstatus` prompt is rendered (otherwise `index.lock` still exists, - # and it will fail). - # We don't technically have to call `wait()` here as the - # `with subprocess.Popen()` context manager above would do that - # for us, but we do it to be explicit that waiting is being done. - proc.wait() # we ignore what git says after we sent it SIGTERM - raise - - -@xl.lazyobject -def _DEFS(): - DEFS = { - "HASH": ":", - "BRANCH": "{CYAN}", - "OPERATION": "{CYAN}", - "STAGED": "{RED}●", - "CONFLICTS": "{RED}×", - "CHANGED": "{BLUE}+", - "UNTRACKED": "…", - "STASHED": "⚑", - "CLEAN": "{BOLD_GREEN}✓", - "AHEAD": "↑·", - "BEHIND": "↓·", - } - return DEFS - - -def _get_def(key): - def_ = builtins.__xonsh__.env.get("XONSH_GITSTATUS_" + key) - return def_ if def_ is not None else _DEFS[key] - - -def _get_tag_or_hash(): - tag_or_hash = _check_output(["git", "describe", "--always"]).strip() - hash_ = _check_output(["git", "rev-parse", "--short", "HEAD"]).strip() - have_tag_name = tag_or_hash != hash_ - return tag_or_hash if have_tag_name else _get_def("HASH") + hash_ - - -def _get_stash(gitdir): - try: - with open(os.path.join(gitdir, "logs/refs/stash")) as f: - return sum(1 for _ in f) - except IOError: - return 0 - - -def _gitoperation(gitdir): - files = ( - ("rebase-merge", "REBASE"), - ("rebase-apply", "AM/REBASE"), - ("MERGE_HEAD", "MERGING"), - ("CHERRY_PICK_HEAD", "CHERRY-PICKING"), - ("REVERT_HEAD", "REVERTING"), - ("BISECT_LOG", "BISECTING"), - ) - return [f[1] for f in files if os.path.exists(os.path.join(gitdir, f[0]))] - - -def gitstatus(): - """Return namedtuple with fields: - branch name, number of ahead commit, number of behind commit, - untracked number, changed number, conflicts number, - staged number, stashed number, operation.""" - status = _check_output(["git", "status", "--porcelain", "--branch"]) - branch = "" - num_ahead, num_behind = 0, 0 - untracked, changed, conflicts, staged = 0, 0, 0, 0 - for line in status.splitlines(): - if line.startswith("##"): - line = line[2:].strip() - if "Initial commit on" in line: - branch = line.split()[-1] - elif "no branch" in line: - branch = _get_tag_or_hash() - elif "..." not in line: - branch = line - else: - branch, rest = line.split("...") - if " " in rest: - divergence = rest.split(" ", 1)[-1] - divergence = divergence.strip("[]") - for div in divergence.split(", "): - if "ahead" in div: - num_ahead = int(div[len("ahead ") :].strip()) - elif "behind" in div: - num_behind = int(div[len("behind ") :].strip()) - elif line.startswith("??"): - untracked += 1 - else: - if len(line) > 1 and line[1] == "M": - changed += 1 - - if len(line) > 0 and line[0] == "U": - conflicts += 1 - elif len(line) > 0 and line[0] != " ": - staged += 1 - - gitdir = _check_output(["git", "rev-parse", "--git-dir"]).strip() - stashed = _get_stash(gitdir) - operations = _gitoperation(gitdir) - - return GitStatus( - branch, - num_ahead, - num_behind, - untracked, - changed, - conflicts, - staged, - stashed, - operations, - ) - - -def gitstatus_prompt(): - """Return str `BRANCH|OPERATOR|numbers`""" - try: - s = gitstatus() - except subprocess.SubprocessError: - return None - - ret = _get_def("BRANCH") + s.branch - if s.num_ahead > 0: - ret += _get_def("AHEAD") + str(s.num_ahead) - if s.num_behind > 0: - ret += _get_def("BEHIND") + str(s.num_behind) - if s.operations: - ret += _get_def("OPERATION") + "|" + "|".join(s.operations) - ret += "|" - if s.staged > 0: - ret += _get_def("STAGED") + str(s.staged) + "{NO_COLOR}" - if s.conflicts > 0: - ret += _get_def("CONFLICTS") + str(s.conflicts) + "{NO_COLOR}" - if s.changed > 0: - ret += _get_def("CHANGED") + str(s.changed) + "{NO_COLOR}" - if s.untracked > 0: - ret += _get_def("UNTRACKED") + str(s.untracked) + "{NO_COLOR}" - if s.stashed > 0: - ret += _get_def("STASHED") + str(s.stashed) + "{NO_COLOR}" - if s.staged + s.conflicts + s.changed + s.untracked + s.stashed == 0: - ret += _get_def("CLEAN") + "{NO_COLOR}" - ret += "{NO_COLOR}" - - return ret diff --git a/xonsh/prompt/job.py b/xonsh/prompt/job.py deleted file mode 100644 index b4d1be4..0000000 --- a/xonsh/prompt/job.py +++ /dev/null @@ -1,15 +0,0 @@ -# -*- coding: utf-8 -*- -"""Prompt formatter for current jobs""" - -import xonsh.jobs as xj - - -def _current_job(): - j = xj.get_next_task() - if j is not None: - if not j["bg"]: - cmd = j["cmds"][-1] - s = cmd[0] - if s == "sudo" and len(cmd) > 1: - s = cmd[1] - return s diff --git a/xonsh/prompt/vc.py b/xonsh/prompt/vc.py deleted file mode 100644 index 42a0e90..0000000 --- a/xonsh/prompt/vc.py +++ /dev/null @@ -1,260 +0,0 @@ -# -*- coding: utf-8 -*- -"""Prompt formatter for simple version control branches""" -# pylint:disable=no-member, invalid-name - -import os -import sys -import queue -import builtins -import threading -import subprocess - -import xonsh.tools as xt - - -def _get_git_branch(q): - denv = builtins.__xonsh__.env.detype() - try: - branches = xt.decode_bytes( - subprocess.check_output( - ["git", "branch"], env=denv, stderr=subprocess.DEVNULL - ) - ).splitlines() - except (subprocess.CalledProcessError, OSError, FileNotFoundError): - q.put(None) - else: - for branch in branches: - if not branch.startswith("* "): - continue - elif branch.endswith(")"): - branch = branch.split()[-1][:-1] - else: - branch = branch.split()[-1] - - q.put(branch) - break - else: - q.put(None) - - -def get_git_branch(): - """Attempts to find the current git branch. If this could not - be determined (timeout, not in a git repo, etc.) then this returns None. - """ - branch = None - timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT") - q = queue.Queue() - - t = threading.Thread(target=_get_git_branch, args=(q,)) - t.start() - t.join(timeout=timeout) - try: - branch = q.get_nowait() - except queue.Empty: - branch = None - return branch - - -def _get_hg_root(q): - _curpwd = builtins.__xonsh__.env["PWD"] - while True: - if not os.path.isdir(_curpwd): - return False - if any([b.name == ".hg" for b in xt.scandir(_curpwd)]): - q.put(_curpwd) - break - else: - _oldpwd = _curpwd - _curpwd = os.path.split(_curpwd)[0] - if _oldpwd == _curpwd: - return False - - -def get_hg_branch(root=None): - """Try to get the mercurial branch of the current directory, - return None if not in a repo or subprocess.TimeoutExpired if timed out. - """ - env = builtins.__xonsh__.env - timeout = env["VC_BRANCH_TIMEOUT"] - q = queue.Queue() - t = threading.Thread(target=_get_hg_root, args=(q,)) - t.start() - t.join(timeout=timeout) - try: - root = q.get_nowait() - except queue.Empty: - return None - if env.get("VC_HG_SHOW_BRANCH"): - # get branch name - branch_path = os.path.sep.join([root, ".hg", "branch"]) - if os.path.exists(branch_path): - with open(branch_path, "r") as branch_file: - branch = branch_file.read() - else: - branch = "default" - else: - branch = "" - # add bookmark, if we can - bookmark_path = os.path.sep.join([root, ".hg", "bookmarks.current"]) - if os.path.exists(bookmark_path): - with open(bookmark_path, "r") as bookmark_file: - active_bookmark = bookmark_file.read() - if env.get("VC_HG_SHOW_BRANCH") is True: - branch = "{0}, {1}".format( - *(b.strip(os.linesep) for b in (branch, active_bookmark)) - ) - else: - branch = active_bookmark.strip(os.linesep) - else: - branch = branch.strip(os.linesep) - return branch - - -_FIRST_BRANCH_TIMEOUT = True - - -def _first_branch_timeout_message(): - global _FIRST_BRANCH_TIMEOUT - sbtm = builtins.__xonsh__.env["SUPPRESS_BRANCH_TIMEOUT_MESSAGE"] - if not _FIRST_BRANCH_TIMEOUT or sbtm: - return - _FIRST_BRANCH_TIMEOUT = False - print( - "xonsh: branch timeout: computing the branch name, color, or both " - "timed out while formatting the prompt. You may avoid this by " - "increasing the value of $VC_BRANCH_TIMEOUT or by removing branch " - "fields, like {curr_branch}, from your $PROMPT. See the FAQ " - "for more details. This message will be suppressed for the remainder " - "of this session. To suppress this message permanently, set " - "$SUPPRESS_BRANCH_TIMEOUT_MESSAGE = True in your xonshrc file.", - file=sys.stderr, - ) - - -def current_branch(): - """Gets the branch for a current working directory. Returns an empty string - if the cwd is not a repository. This currently only works for git and hg - and should be extended in the future. If a timeout occurred, the string - '' is returned. - """ - branch = None - cmds = builtins.__xonsh__.commands_cache - # check for binary only once - if cmds.is_empty(): - has_git = bool(cmds.locate_binary("git", ignore_alias=True)) - has_hg = bool(cmds.locate_binary("hg", ignore_alias=True)) - else: - has_git = bool(cmds.lazy_locate_binary("git", ignore_alias=True)) - has_hg = bool(cmds.lazy_locate_binary("hg", ignore_alias=True)) - if has_git: - branch = get_git_branch() - if not branch and has_hg: - branch = get_hg_branch() - if isinstance(branch, subprocess.TimeoutExpired): - branch = "" - _first_branch_timeout_message() - return branch or None - - -def _git_dirty_working_directory(q, include_untracked): - status = None - denv = builtins.__xonsh__.env.detype() - try: - cmd = ["git", "status", "--porcelain"] - if include_untracked: - cmd.append("--untracked-files=normal") - else: - cmd.append("--untracked-files=no") - status = subprocess.check_output(cmd, stderr=subprocess.DEVNULL, env=denv) - except (subprocess.CalledProcessError, OSError, FileNotFoundError): - q.put(None) - if status is not None: - return q.put(bool(status)) - - -def git_dirty_working_directory(include_untracked=False): - """Returns whether or not the git directory is dirty. If this could not - be determined (timeout, file not found, etc.) then this returns None. - """ - timeout = builtins.__xonsh__.env.get("VC_BRANCH_TIMEOUT") - q = queue.Queue() - t = threading.Thread( - target=_git_dirty_working_directory, args=(q, include_untracked) - ) - t.start() - t.join(timeout=timeout) - try: - return q.get_nowait() - except queue.Empty: - return None - - -def hg_dirty_working_directory(): - """Computes whether or not the mercurial working directory is dirty or not. - If this cannot be determined, None is returned. - """ - env = builtins.__xonsh__.env - cwd = env["PWD"] - denv = env.detype() - vcbt = env["VC_BRANCH_TIMEOUT"] - # Override user configurations settings and aliases - denv["HGRCPATH"] = "" - try: - s = subprocess.check_output( - ["hg", "identify", "--id"], - stderr=subprocess.PIPE, - cwd=cwd, - timeout=vcbt, - universal_newlines=True, - env=denv, - ) - return s.strip(os.linesep).endswith("+") - except ( - subprocess.CalledProcessError, - subprocess.TimeoutExpired, - FileNotFoundError, - ): - return None - - -def dirty_working_directory(): - """Returns a boolean as to whether there are uncommitted files in version - control repository we are inside. If this cannot be determined, returns - None. Currently supports git and hg. - """ - dwd = None - cmds = builtins.__xonsh__.commands_cache - if cmds.lazy_locate_binary("git", ignore_alias=True): - dwd = git_dirty_working_directory() - if cmds.lazy_locate_binary("hg", ignore_alias=True) and dwd is None: - dwd = hg_dirty_working_directory() - return dwd - - -def branch_color(): - """Return red if the current branch is dirty, yellow if the dirtiness can - not be determined, and green if it clean. These are bold, intense colors - for the foreground. - """ - dwd = dirty_working_directory() - if dwd is None: - color = "{YELLOW}" - elif dwd: - color = "{RED}" - else: - color = "{GREEN}" - return color - - -def branch_bg_color(): - """Return red if the current branch is dirty, yellow if the dirtiness can - not be determined, and green if it clean. These are background colors. - """ - dwd = dirty_working_directory() - if dwd is None: - color = "{BACKGROUND_YELLOW}" - elif dwd: - color = "{BACKGROUND_RED}" - else: - color = "{BACKGROUND_GREEN}" - return color diff --git a/xonsh/ptk2/completer.py b/xonsh/prompt_toolkit_completer.py similarity index 66% rename from xonsh/ptk2/completer.py rename to xonsh/prompt_toolkit_completer.py index 145bdce..9f9f756 100644 --- a/xonsh/ptk2/completer.py +++ b/xonsh/prompt_toolkit_completer.py @@ -1,7 +1,5 @@ -# -*- coding: utf-8 -*- """Completer implementation to use with prompt_toolkit.""" - -from prompt_toolkit.completion import Completer +from prompt_toolkit.completion import Completer, Completion from gitsome.completer import CompleterGitsome @@ -12,18 +10,15 @@ class PromptToolkitCompleter(Completer): It just redirects requests to normal Xonsh completer. """ - def __init__(self, completer, ctx, shell): - """Takes instance of xonsh.completer.Completer, the xonsh execution - context, and the shell instance itself. - """ + def __init__(self, completer, ctx): + """Takes instance of xonsh.completer.Completer and dict with context.""" self.completer = completer - self.ctx = ctx - self.shell = shell self.completer_gitsome = CompleterGitsome() + self.ctx = ctx def get_completions(self, document, complete_event): """Returns a generator for list of completions.""" - line = document.current_line.lstrip() + line = document.current_line endidx = document.cursor_position_col space_pos = document.find_backwards(' ') if space_pos is None: @@ -31,11 +26,11 @@ def get_completions(self, document, complete_event): else: begidx = space_pos + endidx + 1 prefix = line[begidx:endidx] - completions, dummy = self.completer.complete(prefix, - line, - begidx, - endidx, - self.ctx) + completions = self.completer.complete(prefix, + line, + begidx, + endidx, + self.ctx) completions_gitsome = \ self.completer_gitsome.get_completions(document, complete_event) diff --git a/xonsh/prompt_toolkit_history.py b/xonsh/prompt_toolkit_history.py new file mode 100644 index 0000000..f1895ce --- /dev/null +++ b/xonsh/prompt_toolkit_history.py @@ -0,0 +1,101 @@ +"""History object for use with prompt_toolkit.""" +import os + +from prompt_toolkit.history import History + + +def load_file_into_list(store, filename): + """Load content of file filename into list store.""" + if os.path.exists(filename): + with open(filename, 'rb') as hfile: + for line in hfile: + line = line.decode('utf-8') + # Drop trailing newline + store.append(line[:-1]) + + +class LimitedFileHistory(History): + + """History class that keeps entries in file with limit on number of those. + + It handles only one-line entries. + """ + + def __init__(self): + """Initialize history object.""" + self.strings = [] + self.new_entries = [] + self.old_history = [] + + def append(self, entry): + """Append new entry to the history. + + Entry sould be a one-liner. + """ + self.strings.append(entry) + self.new_entries.append(entry) + + def __getitem__(self, index): + return self.strings[index] + + def __len__(self): + return len(self.strings) + + def __iter__(self): + return iter(self.strings) + + def read_history_file(self, filename): + """Read history from given file into memory. + + It first discards all history entries that were read by this function + before, and then tries to read entries from filename as history of + commands that happend before current session. + Entries that were appendend in current session are left unharmed. + + Parameters + ---------- + filename : str + Path to history file. + """ + self.old_history = [] + load_file_into_list(self.old_history, filename) + self.strings = self.old_history[:] + self.strings.extend(self.new_entries) + + def save_history_to_file(self, filename, limit=-1): + """Save history to file. + + It first reads existing history file again, so nothing is overrided. If + combined number of entries from history file and current session + exceeds limit old entries are dropped. + Not thread safe. + + Parameters + ---------- + filename : str + Path to file to save history to. + limit : int + Limit on number of entries in history file. Negative values imply + unlimited history. + """ + def write_list(lst, file_obj): + """Write each element of list as separate lint into file_obj.""" + text = ('\n'.join(lst)) + '\n' + file_obj.write(text.encode('utf-8')) + + if limit < 0: + with open(filename, 'ab') as hfile: + write_list(self.new_entries, hfile) + return + + new_history = [] + load_file_into_list(new_history, filename) + + if len(new_history) + len(self.new_entries) <= limit: + if self.new_entries: + with open(filename, 'ab') as hfile: + write_list(self.new_entries, hfile) + else: + new_history.extend(self.new_entries) + with open(filename, 'wb') as hfile: + write_list(new_history[-limit:], hfile) diff --git a/xonsh/prompt_toolkit_key_bindings.py b/xonsh/prompt_toolkit_key_bindings.py new file mode 100644 index 0000000..dcc0620 --- /dev/null +++ b/xonsh/prompt_toolkit_key_bindings.py @@ -0,0 +1,42 @@ +"""Key bindings for prompt_toolkit xonsh shell.""" +import builtins + +from prompt_toolkit.filters import Filter +from prompt_toolkit.keys import Keys + + +class TabShouldInsertIndentFilter(Filter): + """ + Filter that is intended to check if should insert indent instead of + starting autocompletion. + It basically just checks if there are only whitespaces before the cursor - + if so indent should be inserted, otherwise autocompletion. + """ + def __call__(self, cli): + before_cursor = cli.current_buffer.document.current_line_before_cursor + + return bool(before_cursor.isspace()) + + +def load_xonsh_bindings(key_bindings_manager): + """ + Load custom key bindings. + """ + handle = key_bindings_manager.registry.add_binding + env = builtins.__xonsh_env__ + + @handle(Keys.Tab, filter=TabShouldInsertIndentFilter()) + def _(event): + """ + If there are only whitespaces before current cursor position insert + indent instead of autocompleting. + """ + event.cli.current_buffer.insert_text(env.get('INDENT')) + + @handle(Keys.BackTab) + def insert_literal_tab(event): + """ + Insert literal tab on Shift+Tab instead of autocompleting + """ + event.cli.current_buffer.insert_text(env.get('INDENT')) + diff --git a/xonsh/prompt_toolkit_shell.py b/xonsh/prompt_toolkit_shell.py new file mode 100644 index 0000000..51ecd15 --- /dev/null +++ b/xonsh/prompt_toolkit_shell.py @@ -0,0 +1,126 @@ +"""The prompt_toolkit based xonsh shell""" +import os +import builtins +from warnings import warn + +from prompt_toolkit.shortcuts import get_input +from prompt_toolkit.key_binding.manager import KeyBindingManager +from prompt_toolkit.auto_suggest import AutoSuggestFromHistory +from pygments.token import Token +from pygments.style import Style + +from xonsh.base_shell import BaseShell +from xonsh.tools import format_prompt_for_prompt_toolkit +from xonsh.prompt_toolkit_completer import PromptToolkitCompleter +from xonsh.prompt_toolkit_history import LimitedFileHistory +from xonsh.prompt_toolkit_key_bindings import load_xonsh_bindings + + +def setup_history(): + """Creates history object.""" + env = builtins.__xonsh_env__ + hfile = env.get('XONSH_HISTORY_FILE') + history = LimitedFileHistory() + try: + history.read_history_file(hfile) + except PermissionError: + warn('do not have read permissions for ' + hfile, RuntimeWarning) + return history + + +def teardown_history(history): + """Tears down the history object.""" + env = builtins.__xonsh_env__ + hsize = env.get('XONSH_HISTORY_SIZE')[0] + hfile = env.get('XONSH_HISTORY_FILE') + try: + history.save_history_to_file(hfile, hsize) + except PermissionError: + warn('do not have write permissions for ' + hfile, RuntimeWarning) + + +class PromptToolkitShell(BaseShell): + """The xonsh shell.""" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.history = setup_history() + self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx) + self.key_bindings_manager = KeyBindingManager( + enable_auto_suggest_bindings=True, + enable_search=True, enable_abort_and_exit_bindings=True) + load_xonsh_bindings(self.key_bindings_manager) + + def __del__(self): + if self.history is not None: + teardown_history(self.history) + + def cmdloop(self, intro=None): + """Enters a loop that reads and execute input from user.""" + if intro: + print(intro) + _auto_suggest = AutoSuggestFromHistory() + while not builtins.__xonsh_exit__: + try: + token_func, style_cls = self._get_prompt_tokens_and_style() + mouse_support = builtins.__xonsh_env__.get('MOUSE_SUPPORT') + if builtins.__xonsh_env__.get('AUTO_SUGGEST'): + auto_suggest = _auto_suggest + else: + auto_suggest = None + line = get_input( + mouse_support=mouse_support, + auto_suggest=auto_suggest, + get_prompt_tokens=token_func, + style=style_cls, + completer=self.pt_completer, + history=self.history, + key_bindings_registry=self.key_bindings_manager.registry, + display_completions_in_columns=False) + if not line: + self.emptyline() + else: + line = self.precmd(line) + self.default(line) + except KeyboardInterrupt: + self.reset_buffer() + except EOFError: + break + + def _get_prompt_tokens_and_style(self): + """Returns function to pass as prompt to prompt_toolkit.""" + token_names, cstyles, strings = format_prompt_for_prompt_toolkit(self.prompt) + tokens = [getattr(Token, n) for n in token_names] + + def get_tokens(cli): + return list(zip(tokens, strings)) + + class CustomStyle(Style): + styles = { + Token.Menu.Completions.Completion.Current: 'bg:#00aaaa #000000', + Token.Menu.Completions.Completion: 'bg:#008888 #ffffff', + Token.Menu.Completions.Meta.Current: 'bg:#00aaaa #000000', + Token.Menu.Completions.Meta: 'bg:#00aaaa #ffffff', + Token.Menu.Completions.ProgressButton: 'bg:#003333', + Token.Menu.Completions.ProgressBar: 'bg:#00aaaa', + Token.Toolbar: 'bg:#222222 #cccccc', + Token.Scrollbar: 'bg:#00aaaa', + Token.Scrollbar.Button: 'bg:#003333', + Token.Toolbar.Off: 'bg:#222222 #696969', + Token.Toolbar.On: 'bg:#222222 #ffffff', + Token.Toolbar.Search: 'noinherit bold', + Token.Toolbar.Search.Text: 'nobold', + Token.Toolbar.System: 'noinherit bold', + Token.Toolbar.Arg: 'noinherit bold', + Token.Toolbar.Arg.Text: 'nobold', + Token.AutoSuggestion: '#666666', + Token.Aborted: '#888888', + } + # update with the prompt styles + styles.update({t: s for (t, s) in zip(tokens, cstyles)}) + # Update with with any user styles + userstyle = builtins.__xonsh_env__.get('PROMPT_TOOLKIT_STYLES') + if userstyle is not None: + styles.update(userstyle) + + return get_tokens, CustomStyle diff --git a/xonsh/ptk/__init__.py b/xonsh/ptk/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/xonsh/ptk/completer.py b/xonsh/ptk/completer.py deleted file mode 100644 index 84a1821..0000000 --- a/xonsh/ptk/completer.py +++ /dev/null @@ -1,108 +0,0 @@ -# -*- coding: utf-8 -*- -"""Completer implementation to use with prompt_toolkit.""" -import os -import builtins - -from prompt_toolkit.layout.dimension import LayoutDimension -from prompt_toolkit.completion import Completer, Completion -from prompt_toolkit.auto_suggest import AutoSuggestFromHistory - - -class PromptToolkitCompleter(Completer): - """Simple prompt_toolkit Completer object. - - It just redirects requests to normal Xonsh completer. - """ - - def __init__(self, completer, ctx, shell): - """Takes instance of xonsh.completer.Completer, the xonsh execution - context, and the shell instance itself. - """ - self.completer = completer - self.ctx = ctx - self.shell = shell - self.hist_suggester = AutoSuggestFromHistory() - - def get_completions(self, document, complete_event): - """Returns a generator for list of completions.""" - env = builtins.__xonsh__.env - should_complete = complete_event.completion_requested or env.get( - "UPDATE_COMPLETIONS_ON_KEYPRESS" - ) - # Only generate completions when the user hits tab. - if not should_complete or self.completer is None: - return - # generate actual completions - line = document.current_line.lstrip() - line_ex = builtins.aliases.expand_alias(line) - - endidx = document.cursor_position_col - begidx = line[:endidx].rfind(" ") + 1 if line[:endidx].rfind(" ") >= 0 else 0 - prefix = line[begidx:endidx] - expand_offset = len(line_ex) - len(line) - # get normal completions - completions, l = self.completer.complete( - prefix, line_ex, begidx + expand_offset, endidx + expand_offset, self.ctx - ) - # completions from auto suggest - sug_comp = None - if env.get("AUTO_SUGGEST") and env.get("AUTO_SUGGEST_IN_COMPLETIONS"): - sug_comp = self.suggestion_completion(document, line) - if sug_comp is None: - pass - elif len(completions) == 0: - completions = (sug_comp,) - else: - completions = set(completions) - completions.discard(sug_comp) - completions = (sug_comp,) + tuple(sorted(completions)) - # reserve space, if needed. - if len(completions) <= 1: - pass - elif len(os.path.commonprefix(completions)) <= len(prefix): - self.reserve_space() - # Find common prefix (strip quoting) - c_prefix = os.path.commonprefix([a.strip("'\"") for a in completions]) - # Find last split symbol, do not trim the last part - while c_prefix: - if c_prefix[-1] in r"/\.:@,": - break - c_prefix = c_prefix[:-1] - # yield completions - if sug_comp is None: - pre = min(document.cursor_position_col - begidx, len(c_prefix)) - else: - pre = len(c_prefix) - for comp in completions: - # do not display quote - disp = comp[pre:].strip("'\"") - yield Completion(comp, -l, display=disp) - - def suggestion_completion(self, document, line): - """Provides a completion based on the current auto-suggestion.""" - cli = self.shell.prompter.cli - sug = self.hist_suggester.get_suggestion(cli, cli.current_buffer, document) - if sug is None: - return None - comp, _, _ = sug.text.partition(" ") - _, _, prev = line.rpartition(" ") - return prev + comp - - def reserve_space(self): - cli = builtins.__xonsh__.shell.shell.prompter.cli - window = cli.application.layout.children[0].content.children[1] - - if window and window.render_info: - h = window.render_info.content_height - r = builtins.__xonsh__.env.get("COMPLETIONS_MENU_ROWS") - size = h + r - - def comp_height(cli): - # If there is an autocompletion menu to be shown, make sure that o - # layout has at least a minimal height in order to display it. - if not cli.is_done: - return LayoutDimension(min=size) - else: - return LayoutDimension() - - window._height = comp_height diff --git a/xonsh/ptk/history.py b/xonsh/ptk/history.py deleted file mode 100644 index 9a692a0..0000000 --- a/xonsh/ptk/history.py +++ /dev/null @@ -1,80 +0,0 @@ -# -*- coding: utf-8 -*- -"""History object for use with prompt_toolkit.""" -import builtins -from threading import Thread - -import prompt_toolkit.history - - -class PromptToolkitHistory(prompt_toolkit.history.History): - """History class that implements the prompt-toolkit history interface - with the xonsh backend. - """ - - def __init__(self, load_prev=True, wait_for_gc=True, *args, **kwargs): - """Initialize history object.""" - super().__init__() - self.strings = [] - if load_prev: - PromptToolkitHistoryAdder(self, wait_for_gc=wait_for_gc) - - def append(self, entry): - """Append new entry to the history.""" - self.strings.append(entry) - - def __getitem__(self, index): - return self.strings[index] - - def __len__(self): - return len(self.strings) - - def __iter__(self): - return iter(self.strings) - - -class PromptToolkitHistoryAdder(Thread): - def __init__(self, ptkhist, wait_for_gc=True, *args, **kwargs): - """Thread responsible for adding inputs from history to the current - prompt-toolkit history instance. May wait for the history garbage - collector to finish. - """ - super(PromptToolkitHistoryAdder, self).__init__(*args, **kwargs) - self.daemon = True - self.ptkhist = ptkhist - self.wait_for_gc = wait_for_gc - self.start() - - def run(self): - hist = builtins.__xonsh__.history - if hist is None: - return - buf = None - ptkhist = self.ptkhist - for cmd in hist.all_items(): - line = cmd["inp"].rstrip() - if len(ptkhist) == 0 or line != ptkhist[-1]: - ptkhist.append(line) - if buf is None: - buf = self._buf() - if buf is None: - continue - buf.reset(initial_document=buf.document) - - def _buf(self): - # Thread-safe version of - # buf = builtins.__xonsh__.shell.shell.prompter.cli.application.buffer - path = [ - "__xonsh__", - "shell", - "shell", - "prompter", - "cli", - "application", - "buffer", - ] - buf = builtins - for a in path: - buf = getattr(buf, a, None) - if buf is None: - break - return buf diff --git a/xonsh/ptk/key_bindings.py b/xonsh/ptk/key_bindings.py deleted file mode 100644 index 5574129..0000000 --- a/xonsh/ptk/key_bindings.py +++ /dev/null @@ -1,366 +0,0 @@ -# -*- coding: utf-8 -*- -"""Key bindings for prompt_toolkit xonsh shell.""" -import builtins - -from prompt_toolkit.enums import DEFAULT_BUFFER -from prompt_toolkit.filters import ( - Condition, - IsMultiline, - HasSelection, - EmacsInsertMode, - ViInsertMode, -) -from prompt_toolkit.keys import Keys - -from xonsh.aliases import xonsh_exit -from xonsh.tools import check_for_partial_string, get_line_continuation -from xonsh.shell import transform_command - -env = builtins.__xonsh__.env -DEDENT_TOKENS = frozenset(["raise", "return", "pass", "break", "continue"]) - - -def carriage_return(b, cli, *, autoindent=True): - """Preliminary parser to determine if 'Enter' key should send command to the - xonsh parser for execution or should insert a newline for continued input. - - Current 'triggers' for inserting a newline are: - - Not on first line of buffer and line is non-empty - - Previous character is a colon (covers if, for, etc...) - - User is in an open paren-block - - Line ends with backslash - - Any text exists below cursor position (relevant when editing previous - multiline blocks) - """ - doc = b.document - at_end_of_line = _is_blank(doc.current_line_after_cursor) - current_line_blank = _is_blank(doc.current_line) - - indent = env.get("INDENT") if autoindent else "" - - partial_string_info = check_for_partial_string(doc.text) - in_partial_string = ( - partial_string_info[0] is not None and partial_string_info[1] is None - ) - - # indent after a colon - if doc.current_line_before_cursor.strip().endswith(":") and at_end_of_line: - b.newline(copy_margin=autoindent) - b.insert_text(indent, fire_event=False) - # if current line isn't blank, check dedent tokens - elif ( - not current_line_blank - and doc.current_line.split(maxsplit=1)[0] in DEDENT_TOKENS - and doc.line_count > 1 - ): - b.newline(copy_margin=autoindent) - b.delete_before_cursor(count=len(indent)) - elif not doc.on_first_line and not current_line_blank: - b.newline(copy_margin=autoindent) - elif doc.current_line.endswith(get_line_continuation()): - b.newline(copy_margin=autoindent) - elif doc.find_next_word_beginning() is not None and ( - any(not _is_blank(i) for i in doc.lines_from_current[1:]) - ): - b.newline(copy_margin=autoindent) - elif not current_line_blank and not can_compile(doc.text): - b.newline(copy_margin=autoindent) - elif current_line_blank and in_partial_string: - b.newline(copy_margin=autoindent) - else: - b.accept_action.validate_and_handle(cli, b) - - -def _is_blank(l): - return len(l.strip()) == 0 - - -def can_compile(src): - """Returns whether the code can be compiled, i.e. it is valid xonsh.""" - src = src if src.endswith("\n") else src + "\n" - src = transform_command(src, show_diff=False) - src = src.lstrip() - try: - builtins.__xonsh__.execer.compile( - src, mode="single", glbs=None, locs=builtins.__xonsh__.ctx - ) - rtn = True - except SyntaxError: - rtn = False - except Exception: - rtn = True - return rtn - - -@Condition -def tab_insert_indent(cli): - """Check if should insert indent instead of starting autocompletion. - Checks if there are only whitespaces before the cursor - if so indent - should be inserted, otherwise autocompletion. - - """ - before_cursor = cli.current_buffer.document.current_line_before_cursor - - return bool(before_cursor.isspace()) - - -@Condition -def beginning_of_line(cli): - """Check if cursor is at beginning of a line other than the first line in a - multiline document - """ - before_cursor = cli.current_buffer.document.current_line_before_cursor - - return bool( - len(before_cursor) == 0 and not cli.current_buffer.document.on_first_line - ) - - -@Condition -def end_of_line(cli): - """Check if cursor is at the end of a line other than the last line in a - multiline document - """ - d = cli.current_buffer.document - at_end = d.is_cursor_at_the_end_of_line - last_line = d.is_cursor_at_the_end - - return bool(at_end and not last_line) - - -@Condition -def should_confirm_completion(cli): - """Check if completion needs confirmation""" - return ( - builtins.__xonsh__.env.get("COMPLETIONS_CONFIRM") - and cli.current_buffer.complete_state - ) - - -# Copied from prompt-toolkit's key_binding/bindings/basic.py -@Condition -def ctrl_d_condition(cli): - """Ctrl-D binding is only active when the default buffer is selected and - empty. - """ - if builtins.__xonsh__.env.get("IGNOREEOF"): - raise EOFError - else: - return cli.current_buffer_name == DEFAULT_BUFFER and not cli.current_buffer.text - - -@Condition -def autopair_condition(cli): - """Check if XONSH_AUTOPAIR is set""" - return builtins.__xonsh__.env.get("XONSH_AUTOPAIR", False) - - -@Condition -def whitespace_or_bracket_before(cli): - """Check if there is whitespace or an opening - bracket to the left of the cursor""" - d = cli.current_buffer.document - return bool( - d.cursor_position == 0 - or d.char_before_cursor.isspace() - or d.char_before_cursor in "([{" - ) - - -@Condition -def whitespace_or_bracket_after(cli): - """Check if there is whitespace or a closing - bracket to the right of the cursor""" - d = cli.current_buffer.document - return bool( - d.is_cursor_at_the_end_of_line - or d.current_char.isspace() - or d.current_char in ")]}" - ) - - -def load_xonsh_bindings(key_bindings_manager): - """ - Load custom key bindings. - """ - handle = key_bindings_manager.registry.add_binding - has_selection = HasSelection() - insert_mode = ViInsertMode() | EmacsInsertMode() - - @handle(Keys.Tab, filter=tab_insert_indent) - def insert_indent(event): - """ - If there are only whitespaces before current cursor position insert - indent instead of autocompleting. - """ - event.cli.current_buffer.insert_text(env.get("INDENT")) - - @handle(Keys.ControlX, Keys.ControlE, filter=~has_selection) - def open_editor(event): - """ Open current buffer in editor """ - event.current_buffer.open_in_editor(event.cli) - - @handle(Keys.BackTab, filter=insert_mode) - def insert_literal_tab(event): - """ Insert literal tab on Shift+Tab instead of autocompleting """ - b = event.current_buffer - if b.complete_state: - b.complete_previous() - else: - event.cli.current_buffer.insert_text(env.get("INDENT")) - - @handle("(", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_parens(event): - event.cli.current_buffer.insert_text("(") - event.cli.current_buffer.insert_text(")", move_cursor=False) - - @handle(")", filter=autopair_condition) - def overwrite_right_parens(event): - buffer = event.cli.current_buffer - if buffer.document.current_char == ")": - buffer.cursor_position += 1 - else: - buffer.insert_text(")") - - @handle("[", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_bracket(event): - event.cli.current_buffer.insert_text("[") - event.cli.current_buffer.insert_text("]", move_cursor=False) - - @handle("]", filter=autopair_condition) - def overwrite_right_bracket(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "]": - buffer.cursor_position += 1 - else: - buffer.insert_text("]") - - @handle("{", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_brace(event): - event.cli.current_buffer.insert_text("{") - event.cli.current_buffer.insert_text("}", move_cursor=False) - - @handle("}", filter=autopair_condition) - def overwrite_right_brace(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "}": - buffer.cursor_position += 1 - else: - buffer.insert_text("}") - - @handle("'", filter=autopair_condition) - def insert_right_quote(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "'": - buffer.cursor_position += 1 - elif whitespace_or_bracket_before(event.cli) and whitespace_or_bracket_after( - event.cli - ): - buffer.insert_text("'") - buffer.insert_text("'", move_cursor=False) - else: - buffer.insert_text("'") - - @handle('"', filter=autopair_condition) - def insert_right_double_quote(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == '"': - buffer.cursor_position += 1 - elif whitespace_or_bracket_before(event.cli) and whitespace_or_bracket_after( - event.cli - ): - buffer.insert_text('"') - buffer.insert_text('"', move_cursor=False) - else: - buffer.insert_text('"') - - @handle(Keys.Backspace, filter=autopair_condition) - def delete_brackets_or_quotes(event): - """Delete empty pair of brackets or quotes""" - buffer = event.cli.current_buffer - before = buffer.document.char_before_cursor - after = buffer.document.current_char - - if any( - [before == b and after == a for (b, a) in ["()", "[]", "{}", "''", '""']] - ): - buffer.delete(1) - - buffer.delete_before_cursor(1) - - @handle(Keys.ControlD, filter=ctrl_d_condition) - def call_exit_alias(event): - """Use xonsh exit function""" - b = event.cli.current_buffer - b.accept_action.validate_and_handle(event.cli, b) - xonsh_exit([]) - - @handle(Keys.ControlJ, filter=IsMultiline()) - def multiline_carriage_return(event): - """ Wrapper around carriage_return multiline parser """ - b = event.cli.current_buffer - carriage_return(b, event.cli) - - @handle(Keys.ControlJ, filter=should_confirm_completion) - def enter_confirm_completion(event): - """Ignore (confirm completion)""" - event.current_buffer.complete_state = None - - @handle(Keys.Escape, filter=should_confirm_completion) - def esc_cancel_completion(event): - """Use to cancel completion""" - event.cli.current_buffer.cancel_completion() - - @handle(Keys.Escape, Keys.ControlJ) - def execute_block_now(event): - """Execute a block of text irrespective of cursor position""" - b = event.cli.current_buffer - b.accept_action.validate_and_handle(event.cli, b) - - @handle(Keys.Left, filter=beginning_of_line) - def wrap_cursor_back(event): - """Move cursor to end of previous line unless at beginning of - document - """ - b = event.cli.current_buffer - b.cursor_up(count=1) - relative_end_index = b.document.get_end_of_line_position() - b.cursor_right(count=relative_end_index) - - @handle(Keys.Right, filter=end_of_line) - def wrap_cursor_forward(event): - """Move cursor to beginning of next line unless at end of document""" - b = event.cli.current_buffer - relative_begin_index = b.document.get_start_of_line_position() - b.cursor_left(count=abs(relative_begin_index)) - b.cursor_down(count=1) - - @handle(Keys.ControlI, filter=insert_mode) - def generate_completions(event): - """ - Tab-completion: where the first tab completes the common suffix and the - second tab lists all the completions. - - Notes - ----- - This method was forked from the mainline prompt-toolkit repo. - Copyright (c) 2014, Jonathan Slenders, All rights reserved. - """ - b = event.current_buffer - - def second_tab(): - if b.complete_state: - b.complete_next() - else: - event.cli.start_completion(select_first=False) - - # On the second tab-press, or when already navigating through - # completions. - if event.is_repeat or b.complete_state: - second_tab() - else: - event.cli.start_completion(insert_common_part=True, select_first=False) diff --git a/xonsh/ptk/shell.py b/xonsh/ptk/shell.py deleted file mode 100644 index ccb95dc..0000000 --- a/xonsh/ptk/shell.py +++ /dev/null @@ -1,337 +0,0 @@ -# -*- coding: utf-8 -*- -"""The prompt_toolkit based xonsh shell.""" -import sys -import builtins - -from prompt_toolkit.key_binding.manager import KeyBindingManager -from prompt_toolkit.auto_suggest import AutoSuggestFromHistory -from prompt_toolkit.layout.lexers import PygmentsLexer -from prompt_toolkit.shortcuts import print_tokens -from prompt_toolkit.styles import PygmentsStyle, style_from_dict - -from xonsh.base_shell import BaseShell -from xonsh.tools import print_exception, carriage_return, ansicolors_to_ptk1_names -from xonsh.ptk.completer import PromptToolkitCompleter -from xonsh.ptk.history import PromptToolkitHistory -from xonsh.ptk.key_bindings import load_xonsh_bindings -from xonsh.ptk.shortcuts import Prompter -from xonsh.events import events -from xonsh.shell import transform_command -from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS -from xonsh.style_tools import ( - partial_color_tokenize, - _TokenType, - DEFAULT_STYLE_DICT as _DEFAULT_STYLE_DICT, -) -from xonsh.lazyimps import pygments, pyghooks, winutils -from xonsh.pygments_cache import get_all_styles -from xonsh.lazyasd import LazyObject - - -Token = _TokenType() - -events.transmogrify("on_ptk_create", "LoadEvent") -events.doc( - "on_ptk_create", - """ -on_ptk_create(prompter: Prompter, history: PromptToolkitHistory, completer: PromptToolkitCompleter, bindings: KeyBindingManager) -> - -Fired after prompt toolkit has been initialized -""", -) - -# Convert new ansicolor names to names -# understood by PTK1 -DEFAULT_STYLE_DICT = LazyObject( - lambda: ansicolors_to_ptk1_names(_DEFAULT_STYLE_DICT), - globals(), - "DEFAULT_STYLE_DICT", -) - - -class PromptToolkitShell(BaseShell): - """The xonsh shell.""" - - def __init__(self, **kwargs): - super().__init__(**kwargs) - if ON_WINDOWS: - winutils.enable_virtual_terminal_processing() - self._first_prompt = True - self.prompter = Prompter() - self.history = PromptToolkitHistory() - self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx, self) - key_bindings_manager_args = { - "enable_auto_suggest_bindings": True, - "enable_search": True, - "enable_abort_and_exit_bindings": True, - } - self.key_bindings_manager = KeyBindingManager(**key_bindings_manager_args) - load_xonsh_bindings(self.key_bindings_manager) - # This assumes that PromptToolkitShell is a singleton - events.on_ptk_create.fire( - prompter=self.prompter, - history=self.history, - completer=self.pt_completer, - bindings=self.key_bindings_manager, - ) - - def singleline( - self, - store_in_history=True, - auto_suggest=None, - enable_history_search=True, - multiline=True, - **kwargs - ): - """Reads a single line of input from the shell. The store_in_history - kwarg flags whether the input should be stored in PTK's in-memory - history. - """ - events.on_pre_prompt.fire() - env = builtins.__xonsh__.env - mouse_support = env.get("MOUSE_SUPPORT") - if store_in_history: - history = self.history - else: - history = None - enable_history_search = False - auto_suggest = auto_suggest if env.get("AUTO_SUGGEST") else None - completions_display = env.get("COMPLETIONS_DISPLAY") - multicolumn = completions_display == "multi" - complete_while_typing = env.get("UPDATE_COMPLETIONS_ON_KEYPRESS") - if complete_while_typing: - # PTK requires history search to be none when completing while typing - enable_history_search = False - if HAS_PYGMENTS: - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - completer = None if completions_display == "none" else self.pt_completer - if not env.get("UPDATE_PROMPT_ON_KEYPRESS"): - prompt_tokens_cached = self.prompt_tokens(None) - get_prompt_tokens = lambda cli: prompt_tokens_cached - rprompt_tokens_cached = self.rprompt_tokens(None) - get_rprompt_tokens = lambda cli: rprompt_tokens_cached - bottom_toolbar_tokens_cached = self.bottom_toolbar_tokens(None) - get_bottom_toolbar_tokens = lambda cli: bottom_toolbar_tokens_cached - else: - get_prompt_tokens = self.prompt_tokens - get_rprompt_tokens = self.rprompt_tokens - get_bottom_toolbar_tokens = self.bottom_toolbar_tokens - - with self.prompter: - prompt_args = { - "mouse_support": mouse_support, - "auto_suggest": auto_suggest, - "get_prompt_tokens": get_prompt_tokens, - "get_rprompt_tokens": get_rprompt_tokens, - "get_bottom_toolbar_tokens": get_bottom_toolbar_tokens, - "completer": completer, - "multiline": multiline, - "get_continuation_tokens": self.continuation_tokens, - "history": history, - "enable_history_search": enable_history_search, - "reserve_space_for_menu": 0, - "key_bindings_registry": self.key_bindings_manager.registry, - "display_completions_in_columns": multicolumn, - "complete_while_typing": complete_while_typing, - } - if builtins.__xonsh__.env.get("COLOR_INPUT"): - if HAS_PYGMENTS: - prompt_args["lexer"] = PygmentsLexer(pyghooks.XonshLexer) - prompt_args["style"] = PygmentsStyle( - pyghooks.xonsh_style_proxy(self.styler) - ) - else: - prompt_args["style"] = style_from_dict(DEFAULT_STYLE_DICT) - line = self.prompter.prompt(**prompt_args) - events.on_post_prompt.fire() - return line - - def _push(self, line): - """Pushes a line onto the buffer and compiles the code in a way that - enables multiline input. - """ - code = None - self.buffer.append(line) - if self.need_more_lines: - return None, code - src = "".join(self.buffer) - src = transform_command(src) - try: - code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None) - self.reset_buffer() - except Exception: # pylint: disable=broad-except - self.reset_buffer() - print_exception() - return src, None - return src, code - - def cmdloop(self, intro=None): - """Enters a loop that reads and execute input from user.""" - if intro: - print(intro) - auto_suggest = AutoSuggestFromHistory() - self.push = self._push - while not builtins.__xonsh__.exit: - try: - line = self.singleline(auto_suggest=auto_suggest) - if not line: - self.emptyline() - else: - line = self.precmd(line) - self.default(line) - except (KeyboardInterrupt, SystemExit): - self.reset_buffer() - except EOFError: - if builtins.__xonsh__.env.get("IGNOREEOF"): - print('Use "exit" to leave the shell.', file=sys.stderr) - else: - break - - def prompt_tokens(self, cli): - """Returns a list of (token, str) tuples for the current prompt.""" - p = builtins.__xonsh__.env.get("PROMPT") - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - if self._first_prompt: - carriage_return() - self._first_prompt = False - self.settitle() - return toks - - def rprompt_tokens(self, cli): - """Returns a list of (token, str) tuples for the current right - prompt. - """ - p = builtins.__xonsh__.env.get("RIGHT_PROMPT") - # self.prompt_formatter does handle empty strings properly, - # but this avoids descending into it in the common case of - # $RIGHT_PROMPT == ''. - if isinstance(p, str) and len(p) == 0: - return [] - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - return toks - - def bottom_toolbar_tokens(self, cli): - """Returns a list of (token, str) tuples for the current bottom - toolbar. - """ - p = builtins.__xonsh__.env.get("BOTTOM_TOOLBAR") - # self.prompt_formatter does handle empty strings properly, - # but this avoids descending into it in the common case of - # $TOOLBAR == ''. - if isinstance(p, str) and len(p) == 0: - return [] - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - return toks - - def continuation_tokens(self, cli, width): - """Displays dots in multiline prompt""" - width = width - 1 - dots = builtins.__xonsh__.env.get("MULTILINE_PROMPT") - dots = dots() if callable(dots) else dots - if dots is None: - return [(Token, " " * (width + 1))] - basetoks = self.format_color(dots) - baselen = sum(len(t[1]) for t in basetoks) - if baselen == 0: - return [(Token, " " * (width + 1))] - toks = basetoks * (width // baselen) - n = width % baselen - count = 0 - for tok in basetoks: - slen = len(tok[1]) - newcount = slen + count - if slen == 0: - continue - elif newcount <= n: - toks.append(tok) - else: - toks.append((tok[0], tok[1][: n - count])) - count = newcount - if n <= count: - break - toks.append((Token, " ")) # final space - return toks - - def format_color(self, string, hide=False, force_string=False, **kwargs): - """Formats a color string using Pygments. This, therefore, returns - a list of (Token, str) tuples. If force_string is set to true, though, - this will return a color formatted string. - """ - tokens = partial_color_tokenize(string) - if force_string and HAS_PYGMENTS: - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - proxy_style = pyghooks.xonsh_style_proxy(self.styler) - formatter = pyghooks.XonshTerminal256Formatter(style=proxy_style) - s = pygments.format(tokens, formatter) - return s - elif force_string: - print("To force colorization of string, install Pygments") - return tokens - else: - return tokens - - def print_color(self, string, end="\n", **kwargs): - """Prints a color string using prompt-toolkit color management.""" - if isinstance(string, str): - tokens = partial_color_tokenize(string + end) - else: - # assume this is a list of (Token, str) tuples and just print - tokens = string - if HAS_PYGMENTS: - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - proxy_style = PygmentsStyle(pyghooks.xonsh_style_proxy(self.styler)) - else: - proxy_style = style_from_dict(DEFAULT_STYLE_DICT) - print_tokens(tokens, style=proxy_style) - - def color_style_names(self): - """Returns an iterable of all available style names.""" - if not HAS_PYGMENTS: - return ["For other xonsh styles, please install pygments"] - return get_all_styles() - - def color_style(self): - """Returns the current color map.""" - if not HAS_PYGMENTS: - return DEFAULT_STYLE_DICT - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - return self.styler.styles - - def restore_tty_sanity(self): - """An interface for resetting the TTY stdin mode. This is highly - dependent on the shell backend. Also it is mostly optional since - it only affects ^Z backgrounding behaviour. - """ - # PTK does not seem to need any specialization here. However, - # if it does for some reason in the future... - # The following writes an ANSI escape sequence that sends the cursor - # to the end of the line. This has the effect of restoring ECHO mode. - # See http://unix.stackexchange.com/a/108014/129048 for more details. - # This line can also be replaced by os.system("stty sane"), as per - # http://stackoverflow.com/questions/19777129/interactive-python-interpreter-run-in-background#comment29421919_19778355 - # However, it is important to note that not termios-based solution - # seems to work. My guess is that this is because termios restoration - # needs to be performed by the subprocess itself. This fix is important - # when subprocesses don't properly restore the terminal attributes, - # like Python in interactive mode. Also note that the sequences "\033M" - # and "\033E" seem to work too, but these are technically VT100 codes. - # I used the more primitive ANSI sequence to maximize compatibility. - # -scopatz 2017-01-28 - # if not ON_POSIX: - # return - # sys.stdout.write('\033[9999999C\n') diff --git a/xonsh/ptk/shortcuts.py b/xonsh/ptk/shortcuts.py deleted file mode 100644 index c5e797d..0000000 --- a/xonsh/ptk/shortcuts.py +++ /dev/null @@ -1,131 +0,0 @@ -"""A prompt-toolkit inspired shortcut collection.""" -import builtins -import textwrap - -from prompt_toolkit.interface import CommandLineInterface -from prompt_toolkit.enums import EditingMode -from prompt_toolkit.utils import DummyContext -from prompt_toolkit.shortcuts import ( - create_prompt_application, - create_eventloop, - create_asyncio_eventloop, - create_output, -) - -from xonsh.platform import ptk_version_info -import xonsh.tools as xt - - -class Prompter(object): - def __init__(self, cli=None, *args, **kwargs): - """Implements a prompt that statefully holds a command-line - interface. When used as a context manager, it will return itself - on entry and reset itself on exit. - - Parameters - ---------- - cli : CommandLineInterface or None, optional - If this is not a CommandLineInterface object, such an object - will be created when the prompt() method is called. - """ - self.cli = cli - self.major_minor = ptk_version_info()[:2] - - def __enter__(self): - self.reset() - return self - - def __exit__(self, exc_type, exc_value, traceback): - pass - - def prompt(self, message="", **kwargs): - """Get input from the user and return it. - - This is a wrapper around a lot of prompt_toolkit functionality and - can be a replacement for raw_input. (or GNU readline.) If you want - to keep your history across several calls, create one - `~prompt_toolkit.history.History instance and pass it every - time. This function accepts many keyword arguments. Except for the - following. they are a proxy to the arguments of - create_prompt_application(). - - Parameters - ---------- - patch_stdout : file-like, optional - Replace ``sys.stdout`` by a proxy that ensures that print - statements from other threads won't destroy the prompt. (They - will be printed above the prompt instead.) - return_asyncio_coroutine : bool, optional - When True, return a asyncio coroutine. (Python >3.3) - - Notes - ----- - This method was forked from the mainline prompt-toolkit repo. - Copyright (c) 2014, Jonathan Slenders, All rights reserved. - """ - patch_stdout = kwargs.pop("patch_stdout", False) - return_asyncio_coroutine = kwargs.pop("return_asyncio_coroutine", False) - if return_asyncio_coroutine: - eventloop = create_asyncio_eventloop() - else: - eventloop = kwargs.pop("eventloop", None) or create_eventloop() - - # Create CommandLineInterface. - if self.cli is None: - if builtins.__xonsh__.env.get("VI_MODE"): - editing_mode = EditingMode.VI - else: - editing_mode = EditingMode.EMACS - kwargs["editing_mode"] = editing_mode - cli = CommandLineInterface( - application=create_prompt_application(message, **kwargs), - eventloop=eventloop, - output=create_output(), - ) - self.cli = cli - else: - cli = self.cli - - # Replace stdout. - patch_context = cli.patch_stdout_context() if patch_stdout else DummyContext() - - # Read input and return it. - if return_asyncio_coroutine: - # Create an asyncio coroutine and call it. - exec_context = {"patch_context": patch_context, "cli": cli} - exec( - textwrap.dedent( - """ - import asyncio - @asyncio.coroutine - def prompt_coro(): - with patch_context: - document = yield from cli.run_async(reset_current_buffer=False) - if document: - return document.text - """ - ), - exec_context, - ) - return exec_context["prompt_coro"]() - else: - # Note: We pass `reset_current_buffer=False`, because that way - # it's easy to give DEFAULT_BUFFER a default value, without it - # getting erased. We don't have to reset anyway, because this is - # the first and only time that this CommandLineInterface will run. - try: - with patch_context: - document = cli.run(reset_current_buffer=False) - if document: - return document.text - except Exception: - xt.print_exception() - # return something to prevent xonsh crash when any - # exceptions raise - return "" - finally: - eventloop.close() - - def reset(self): - """Resets the prompt and cli to a pristine state on this object.""" - self.cli = None diff --git a/xonsh/ptk2/__init__.py b/xonsh/ptk2/__init__.py deleted file mode 100644 index 7817046..0000000 --- a/xonsh/ptk2/__init__.py +++ /dev/null @@ -1,8 +0,0 @@ -# must come before ptk / pygments imports -from xonsh.lazyasd import load_module_in_background - -load_module_in_background( - "pkg_resources", - debug="XONSH_DEBUG", - replacements={"pygments.plugin": "pkg_resources"}, -) diff --git a/xonsh/ptk2/history.py b/xonsh/ptk2/history.py deleted file mode 100644 index 52ea137..0000000 --- a/xonsh/ptk2/history.py +++ /dev/null @@ -1,53 +0,0 @@ -# -*- coding: utf-8 -*- -"""History object for use with prompt_toolkit.""" -import builtins - -import prompt_toolkit.history - - -class PromptToolkitHistory(prompt_toolkit.history.History): - """History class that implements the prompt-toolkit history interface - with the xonsh backend. - """ - - def __init__(self, load_prev=True, *args, **kwargs): - """Initialize history object.""" - super().__init__() - self.load_prev = load_prev - - def store_string(self, entry): - pass - - def load_history_strings(self): - """Loads synchronous history strings""" - if not self.load_prev: - return - hist = builtins.__xonsh__.history - if hist is None: - return - for cmd in hist.all_items(newest_first=True): - line = cmd["inp"].rstrip() - strs = self.get_strings() - if len(strs) == 0 or line != strs[-1]: - yield line - - def __getitem__(self, index): - return self.get_strings()[index] - - def __len__(self): - return len(self.get_strings()) - - def __iter__(self): - return iter(self.get_strings()) - - -def _cust_history_matches(self, i): - """Custom history search method for prompt_toolkit that matches previous - commands anywhere on a line, not just at the start. - - This gets monkeypatched into the prompt_toolkit prompter if - ``XONSH_HISTORY_MATCH_ANYWHERE=True``""" - return ( - self.history_search_text is None - or self.history_search_text in self._working_lines[i] - ) diff --git a/xonsh/ptk2/key_bindings.py b/xonsh/ptk2/key_bindings.py deleted file mode 100644 index cf7bd38..0000000 --- a/xonsh/ptk2/key_bindings.py +++ /dev/null @@ -1,352 +0,0 @@ -# -*- coding: utf-8 -*- -"""Key bindings for prompt_toolkit xonsh shell.""" -import builtins - -from prompt_toolkit import search -from prompt_toolkit.enums import DEFAULT_BUFFER -from prompt_toolkit.filters import ( - Condition, - IsMultiline, - HasSelection, - EmacsInsertMode, - ViInsertMode, - IsSearching, -) -from prompt_toolkit.keys import Keys -from prompt_toolkit.application.current import get_app - -from xonsh.aliases import xonsh_exit -from xonsh.tools import check_for_partial_string, get_line_continuation -from xonsh.shell import transform_command - -DEDENT_TOKENS = frozenset(["raise", "return", "pass", "break", "continue"]) - - -def carriage_return(b, cli, *, autoindent=True): - """Preliminary parser to determine if 'Enter' key should send command to the - xonsh parser for execution or should insert a newline for continued input. - - Current 'triggers' for inserting a newline are: - - Not on first line of buffer and line is non-empty - - Previous character is a colon (covers if, for, etc...) - - User is in an open paren-block - - Line ends with backslash - - Any text exists below cursor position (relevant when editing previous - multiline blocks) - """ - doc = b.document - at_end_of_line = _is_blank(doc.current_line_after_cursor) - current_line_blank = _is_blank(doc.current_line) - - env = builtins.__xonsh__.env - indent = env.get("INDENT") if autoindent else "" - - partial_string_info = check_for_partial_string(doc.text) - in_partial_string = ( - partial_string_info[0] is not None and partial_string_info[1] is None - ) - - # indent after a colon - if doc.current_line_before_cursor.strip().endswith(":") and at_end_of_line: - b.newline(copy_margin=autoindent) - b.insert_text(indent, fire_event=False) - # if current line isn't blank, check dedent tokens - elif ( - not current_line_blank - and doc.current_line.split(maxsplit=1)[0] in DEDENT_TOKENS - and doc.line_count > 1 - ): - b.newline(copy_margin=autoindent) - b.delete_before_cursor(count=len(indent)) - elif not doc.on_first_line and not current_line_blank: - b.newline(copy_margin=autoindent) - elif doc.current_line.endswith(get_line_continuation()): - b.newline(copy_margin=autoindent) - elif doc.find_next_word_beginning() is not None and ( - any(not _is_blank(i) for i in doc.lines_from_current[1:]) - ): - b.newline(copy_margin=autoindent) - elif not current_line_blank and not can_compile(doc.text): - b.newline(copy_margin=autoindent) - elif current_line_blank and in_partial_string: - b.newline(copy_margin=autoindent) - else: - b.validate_and_handle() - - -def _is_blank(l): - return len(l.strip()) == 0 - - -def can_compile(src): - """Returns whether the code can be compiled, i.e. it is valid xonsh.""" - src = src if src.endswith("\n") else src + "\n" - src = transform_command(src, show_diff=False) - src = src.lstrip() - try: - builtins.__xonsh__.execer.compile( - src, mode="single", glbs=None, locs=builtins.__xonsh__.ctx - ) - rtn = True - except SyntaxError: - rtn = False - except Exception: - rtn = True - return rtn - - -@Condition -def tab_insert_indent(): - """Check if should insert indent instead of starting autocompletion. - Checks if there are only whitespaces before the cursor - if so indent - should be inserted, otherwise autocompletion. - - """ - before_cursor = get_app().current_buffer.document.current_line_before_cursor - - return bool(before_cursor.isspace()) - - -@Condition -def beginning_of_line(): - """Check if cursor is at beginning of a line other than the first line in a - multiline document - """ - app = get_app() - before_cursor = app.current_buffer.document.current_line_before_cursor - - return bool( - len(before_cursor) == 0 and not app.current_buffer.document.on_first_line - ) - - -@Condition -def end_of_line(): - """Check if cursor is at the end of a line other than the last line in a - multiline document - """ - d = get_app().current_buffer.document - at_end = d.is_cursor_at_the_end_of_line - last_line = d.is_cursor_at_the_end - - return bool(at_end and not last_line) - - -@Condition -def should_confirm_completion(): - """Check if completion needs confirmation""" - return ( - builtins.__xonsh__.env.get("COMPLETIONS_CONFIRM") - and get_app().current_buffer.complete_state - ) - - -# Copied from prompt-toolkit's key_binding/bindings/basic.py -@Condition -def ctrl_d_condition(): - """Ctrl-D binding is only active when the default buffer is selected and - empty. - """ - if builtins.__xonsh__.env.get("IGNOREEOF"): - return False - else: - app = get_app() - buffer_name = app.current_buffer.name - - return buffer_name == DEFAULT_BUFFER and not app.current_buffer.text - - -@Condition -def autopair_condition(): - """Check if XONSH_AUTOPAIR is set""" - return builtins.__xonsh__.env.get("XONSH_AUTOPAIR", False) - - -@Condition -def whitespace_or_bracket_before(): - """Check if there is whitespace or an opening - bracket to the left of the cursor""" - d = get_app().current_buffer.document - return bool( - d.cursor_position == 0 - or d.char_before_cursor.isspace() - or d.char_before_cursor in "([{" - ) - - -@Condition -def whitespace_or_bracket_after(): - """Check if there is whitespace or a closing - bracket to the right of the cursor""" - d = get_app().current_buffer.document - return bool( - d.is_cursor_at_the_end_of_line - or d.current_char.isspace() - or d.current_char in ")]}" - ) - - -def load_xonsh_bindings(key_bindings): - """ - Load custom key bindings. - """ - handle = key_bindings.add - has_selection = HasSelection() - insert_mode = ViInsertMode() | EmacsInsertMode() - - @handle(Keys.Tab, filter=tab_insert_indent) - def insert_indent(event): - """ - If there are only whitespaces before current cursor position insert - indent instead of autocompleting. - """ - env = builtins.__xonsh__.env - event.cli.current_buffer.insert_text(env.get("INDENT")) - - @handle(Keys.ControlX, Keys.ControlE, filter=~has_selection) - def open_editor(event): - """ Open current buffer in editor """ - event.current_buffer.open_in_editor(event.cli) - - @handle(Keys.BackTab, filter=insert_mode) - def insert_literal_tab(event): - """ Insert literal tab on Shift+Tab instead of autocompleting """ - b = event.current_buffer - if b.complete_state: - b.complete_previous() - else: - env = builtins.__xonsh__.env - event.cli.current_buffer.insert_text(env.get("INDENT")) - - @handle("(", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_parens(event): - event.cli.current_buffer.insert_text("(") - event.cli.current_buffer.insert_text(")", move_cursor=False) - - @handle(")", filter=autopair_condition) - def overwrite_right_parens(event): - buffer = event.cli.current_buffer - if buffer.document.current_char == ")": - buffer.cursor_position += 1 - else: - buffer.insert_text(")") - - @handle("[", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_bracket(event): - event.cli.current_buffer.insert_text("[") - event.cli.current_buffer.insert_text("]", move_cursor=False) - - @handle("]", filter=autopair_condition) - def overwrite_right_bracket(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "]": - buffer.cursor_position += 1 - else: - buffer.insert_text("]") - - @handle("{", filter=autopair_condition & whitespace_or_bracket_after) - def insert_right_brace(event): - event.cli.current_buffer.insert_text("{") - event.cli.current_buffer.insert_text("}", move_cursor=False) - - @handle("}", filter=autopair_condition) - def overwrite_right_brace(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "}": - buffer.cursor_position += 1 - else: - buffer.insert_text("}") - - @handle("'", filter=autopair_condition) - def insert_right_quote(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == "'": - buffer.cursor_position += 1 - elif whitespace_or_bracket_before() and whitespace_or_bracket_after(): - buffer.insert_text("'") - buffer.insert_text("'", move_cursor=False) - else: - buffer.insert_text("'") - - @handle('"', filter=autopair_condition) - def insert_right_double_quote(event): - buffer = event.cli.current_buffer - - if buffer.document.current_char == '"': - buffer.cursor_position += 1 - elif whitespace_or_bracket_before() and whitespace_or_bracket_after(): - buffer.insert_text('"') - buffer.insert_text('"', move_cursor=False) - else: - buffer.insert_text('"') - - @handle(Keys.Backspace, filter=autopair_condition) - def delete_brackets_or_quotes(event): - """Delete empty pair of brackets or quotes""" - buffer = event.cli.current_buffer - before = buffer.document.char_before_cursor - after = buffer.document.current_char - - if any( - [before == b and after == a for (b, a) in ["()", "[]", "{}", "''", '""']] - ): - buffer.delete(1) - - buffer.delete_before_cursor(1) - - @handle(Keys.ControlD, filter=ctrl_d_condition) - def call_exit_alias(event): - """Use xonsh exit function""" - b = event.cli.current_buffer - b.validate_and_handle() - xonsh_exit([]) - - @handle(Keys.ControlJ, filter=IsMultiline()) - @handle(Keys.ControlM, filter=IsMultiline()) - def multiline_carriage_return(event): - """ Wrapper around carriage_return multiline parser """ - b = event.cli.current_buffer - carriage_return(b, event.cli) - - @handle(Keys.ControlJ, filter=should_confirm_completion) - @handle(Keys.ControlM, filter=should_confirm_completion) - def enter_confirm_completion(event): - """Ignore (confirm completion)""" - event.current_buffer.complete_state = None - - @handle(Keys.Escape, filter=should_confirm_completion) - def esc_cancel_completion(event): - """Use to cancel completion""" - event.cli.current_buffer.cancel_completion() - - @handle(Keys.Escape, Keys.ControlJ) - def execute_block_now(event): - """Execute a block of text irrespective of cursor position""" - b = event.cli.current_buffer - b.validate_and_handle() - - @handle(Keys.Left, filter=beginning_of_line) - def wrap_cursor_back(event): - """Move cursor to end of previous line unless at beginning of - document - """ - b = event.cli.current_buffer - b.cursor_up(count=1) - relative_end_index = b.document.get_end_of_line_position() - b.cursor_right(count=relative_end_index) - - @handle(Keys.Right, filter=end_of_line) - def wrap_cursor_forward(event): - """Move cursor to beginning of next line unless at end of document""" - b = event.cli.current_buffer - relative_begin_index = b.document.get_start_of_line_position() - b.cursor_left(count=abs(relative_begin_index)) - b.cursor_down(count=1) - - @handle(Keys.ControlM, filter=IsSearching()) - @handle(Keys.ControlJ, filter=IsSearching()) - def accept_search(event): - search.accept_search() diff --git a/xonsh/ptk2/shell.py b/xonsh/ptk2/shell.py deleted file mode 100644 index 640d58a..0000000 --- a/xonsh/ptk2/shell.py +++ /dev/null @@ -1,350 +0,0 @@ -# -*- coding: utf-8 -*- -"""The prompt_toolkit based xonsh shell.""" -import sys -import builtins -from types import MethodType - -from xonsh.events import events -from xonsh.base_shell import BaseShell -from xonsh.shell import transform_command -from xonsh.tools import print_exception, carriage_return -from xonsh.platform import HAS_PYGMENTS, ON_WINDOWS -from xonsh.style_tools import partial_color_tokenize, _TokenType, DEFAULT_STYLE_DICT -from xonsh.lazyimps import pygments, pyghooks, winutils -from xonsh.pygments_cache import get_all_styles -from xonsh.ptk2.history import PromptToolkitHistory, _cust_history_matches -from xonsh.ptk2.completer import PromptToolkitCompleter -from xonsh.ptk2.key_bindings import load_xonsh_bindings - -from prompt_toolkit.auto_suggest import AutoSuggestFromHistory -from prompt_toolkit.lexers import PygmentsLexer -from prompt_toolkit.enums import EditingMode -from prompt_toolkit.key_binding import KeyBindings -from prompt_toolkit.history import ThreadedHistory -from prompt_toolkit.shortcuts import print_formatted_text as ptk_print -from prompt_toolkit.shortcuts import CompleteStyle -from prompt_toolkit.shortcuts.prompt import PromptSession -from prompt_toolkit.formatted_text import PygmentsTokens -from prompt_toolkit.styles import merge_styles, Style -from prompt_toolkit.styles.pygments import ( - style_from_pygments_cls, - style_from_pygments_dict, -) - - -Token = _TokenType() - -events.transmogrify("on_ptk_create", "LoadEvent") -events.doc( - "on_ptk_create", - """ -on_ptk_create(prompter: PromptSession, history: PromptToolkitHistory, completer: PromptToolkitCompleter, bindings: KeyBindings) -> - -Fired after prompt toolkit has been initialized -""", -) - - -class PromptToolkit2Shell(BaseShell): - """The xonsh shell for prompt_toolkit v2.""" - - completion_displays_to_styles = { - "multi": CompleteStyle.MULTI_COLUMN, - "single": CompleteStyle.COLUMN, - "readline": CompleteStyle.READLINE_LIKE, - "none": None, - } - - def __init__(self, **kwargs): - super().__init__(**kwargs) - if ON_WINDOWS: - winutils.enable_virtual_terminal_processing() - self._first_prompt = True - self.history = ThreadedHistory(PromptToolkitHistory()) - self.prompter = PromptSession(history=self.history) - self.pt_completer = PromptToolkitCompleter(self.completer, self.ctx, self) - self.key_bindings = KeyBindings() - load_xonsh_bindings(self.key_bindings) - # Store original `_history_matches` in case we need to restore it - self._history_matches_orig = self.prompter.default_buffer._history_matches - # This assumes that PromptToolkit2Shell is a singleton - events.on_ptk_create.fire( - prompter=self.prompter, - history=self.history, - completer=self.pt_completer, - bindings=self.key_bindings, - ) - - def singleline( - self, auto_suggest=None, enable_history_search=True, multiline=True, **kwargs - ): - """Reads a single line of input from the shell. The store_in_history - kwarg flags whether the input should be stored in PTK's in-memory - history. - """ - events.on_pre_prompt.fire() - env = builtins.__xonsh__.env - mouse_support = env.get("MOUSE_SUPPORT") - auto_suggest = auto_suggest if env.get("AUTO_SUGGEST") else None - completions_display = env.get("COMPLETIONS_DISPLAY") - complete_style = self.completion_displays_to_styles[completions_display] - - complete_while_typing = env.get("UPDATE_COMPLETIONS_ON_KEYPRESS") - if complete_while_typing: - # PTK requires history search to be none when completing while typing - enable_history_search = False - if HAS_PYGMENTS: - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - completer = None if completions_display == "none" else self.pt_completer - - if env.get("UPDATE_PROMPT_ON_KEYPRESS"): - get_prompt_tokens = self.prompt_tokens - get_rprompt_tokens = self.rprompt_tokens - get_bottom_toolbar_tokens = self.bottom_toolbar_tokens - else: - get_prompt_tokens = self.prompt_tokens() - get_rprompt_tokens = self.rprompt_tokens() - get_bottom_toolbar_tokens = self.bottom_toolbar_tokens() - - if env.get("VI_MODE"): - editing_mode = EditingMode.VI - else: - editing_mode = EditingMode.EMACS - - if env.get("XONSH_HISTORY_MATCH_ANYWHERE"): - self.prompter.default_buffer._history_matches = MethodType( - _cust_history_matches, self.prompter.default_buffer - ) - elif ( - self.prompter.default_buffer._history_matches - is not self._history_matches_orig - ): - self.prompter.default_buffer._history_matches = self._history_matches_orig - - prompt_args = { - "mouse_support": mouse_support, - "auto_suggest": auto_suggest, - "message": get_prompt_tokens, - "rprompt": get_rprompt_tokens, - "bottom_toolbar": get_bottom_toolbar_tokens, - "completer": completer, - "multiline": multiline, - "editing_mode": editing_mode, - "prompt_continuation": self.continuation_tokens, - "enable_history_search": enable_history_search, - "reserve_space_for_menu": 10, - "key_bindings": self.key_bindings, - "complete_style": complete_style, - "complete_while_typing": complete_while_typing, - "include_default_pygments_style": False, - } - if builtins.__xonsh__.env.get("COLOR_INPUT"): - style = style_from_pygments_dict(DEFAULT_STYLE_DICT) - - prompt_args["style"] = style - - style_overrides_env = env.get("PTK_STYLE_OVERRIDES") - if style_overrides_env: - try: - style_overrides = Style.from_dict(style_overrides_env) - prompt_args["style"] = merge_styles([style, style_overrides]) - except (AttributeError, TypeError, ValueError): - print_exception() - - line = self.prompter.prompt(**prompt_args) - events.on_post_prompt.fire() - return line - - def _push(self, line): - """Pushes a line onto the buffer and compiles the code in a way that - enables multiline input. - """ - code = None - self.buffer.append(line) - if self.need_more_lines: - return None, code - src = "".join(self.buffer) - src = transform_command(src) - try: - code = self.execer.compile(src, mode="single", glbs=self.ctx, locs=None) - self.reset_buffer() - except Exception: # pylint: disable=broad-except - self.reset_buffer() - print_exception() - return src, None - return src, code - - def cmdloop(self, intro=None): - """Enters a loop that reads and execute input from user.""" - if intro: - print(intro) - auto_suggest = AutoSuggestFromHistory() - self.push = self._push - while not builtins.__xonsh__.exit: - try: - line = self.singleline(auto_suggest=auto_suggest) - if not line: - self.emptyline() - else: - line = self.precmd(line) - self.default(line) - except (KeyboardInterrupt, SystemExit): - self.reset_buffer() - except EOFError: - if builtins.__xonsh__.env.get("IGNOREEOF"): - print('Use "exit" to leave the shell.', file=sys.stderr) - else: - break - - def prompt_tokens(self): - """Returns a list of (token, str) tuples for the current prompt.""" - p = builtins.__xonsh__.env.get("PROMPT") - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - if self._first_prompt: - carriage_return() - self._first_prompt = False - self.settitle() - return PygmentsTokens(toks) - - def rprompt_tokens(self): - """Returns a list of (token, str) tuples for the current right - prompt. - """ - p = builtins.__xonsh__.env.get("RIGHT_PROMPT") - # self.prompt_formatter does handle empty strings properly, - # but this avoids descending into it in the common case of - # $RIGHT_PROMPT == ''. - if isinstance(p, str) and len(p) == 0: - return [] - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - return PygmentsTokens(toks) - - def bottom_toolbar_tokens(self): - """Returns a list of (token, str) tuples for the current bottom - toolbar. - """ - p = builtins.__xonsh__.env.get("BOTTOM_TOOLBAR") - if not p: - return - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - toks = partial_color_tokenize(p) - return PygmentsTokens(toks) - - def continuation_tokens(self, width, line_number, is_soft_wrap=False): - """Displays dots in multiline prompt""" - if is_soft_wrap: - return "" - width = width - 1 - dots = builtins.__xonsh__.env.get("MULTILINE_PROMPT") - dots = dots() if callable(dots) else dots - if dots is None: - return [(Token, " " * (width + 1))] - basetoks = self.format_color(dots) - baselen = sum(len(t[1]) for t in basetoks) - if baselen == 0: - return [(Token, " " * (width + 1))] - toks = basetoks * (width // baselen) - n = width % baselen - count = 0 - for tok in basetoks: - slen = len(tok[1]) - newcount = slen + count - if slen == 0: - continue - elif newcount <= n: - toks.append(tok) - else: - toks.append((tok[0], tok[1][: n - count])) - count = newcount - if n <= count: - break - toks.append((Token, " ")) # final space - return PygmentsTokens(toks) - - def format_color(self, string, hide=False, force_string=False, **kwargs): - """Formats a color string using Pygments. This, therefore, returns - a list of (Token, str) tuples. If force_string is set to true, though, - this will return a color formatted string. - """ - tokens = partial_color_tokenize(string) - if force_string and HAS_PYGMENTS: - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - proxy_style = pyghooks.xonsh_style_proxy(self.styler) - formatter = pyghooks.XonshTerminal256Formatter(style=proxy_style) - s = pygments.format(tokens, formatter) - return s - elif force_string: - print("To force colorization of string, install Pygments") - return tokens - else: - return tokens - - def print_color(self, string, end="\n", **kwargs): - """Prints a color string using prompt-toolkit color management.""" - if isinstance(string, str): - tokens = partial_color_tokenize(string) - else: - # assume this is a list of (Token, str) tuples and just print - tokens = string - tokens = PygmentsTokens(tokens) - if HAS_PYGMENTS: - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - proxy_style = style_from_pygments_cls( - pyghooks.xonsh_style_proxy(self.styler) - ) - else: - proxy_style = style_from_pygments_dict(DEFAULT_STYLE_DICT) - ptk_print( - tokens, style=proxy_style, end=end, include_default_pygments_style=False - ) - - def color_style_names(self): - """Returns an iterable of all available style names.""" - if not HAS_PYGMENTS: - return ["For other xonsh styles, please install pygments"] - return get_all_styles() - - def color_style(self): - """Returns the current color map.""" - if not HAS_PYGMENTS: - return DEFAULT_STYLE_DICT - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - return self.styler.styles - - def restore_tty_sanity(self): - """An interface for resetting the TTY stdin mode. This is highly - dependent on the shell backend. Also it is mostly optional since - it only affects ^Z backgrounding behaviour. - """ - # PTK does not seem to need any specialization here. However, - # if it does for some reason in the future... - # The following writes an ANSI escape sequence that sends the cursor - # to the end of the line. This has the effect of restoring ECHO mode. - # See http://unix.stackexchange.com/a/108014/129048 for more details. - # This line can also be replaced by os.system("stty sane"), as per - # http://stackoverflow.com/questions/19777129/interactive-python-interpreter-run-in-background#comment29421919_19778355 - # However, it is important to note that not termios-based solution - # seems to work. My guess is that this is because termios restoration - # needs to be performed by the subprocess itself. This fix is important - # when subprocesses don't properly restore the terminal attributes, - # like Python in interactive mode. Also note that the sequences "\033M" - # and "\033E" seem to work too, but these are technically VT100 codes. - # I used the more primitive ANSI sequence to maximize compatibility. - # -scopatz 2017-01-28 - # if not ON_POSIX: - # return - # sys.stdout.write('\033[9999999C\n') diff --git a/xonsh/pyghooks.py b/xonsh/pyghooks.py index 7fcf99f..09f8e3a 100644 --- a/xonsh/pyghooks.py +++ b/xonsh/pyghooks.py @@ -1,1456 +1,67 @@ -# -*- coding: utf-8 -*- """Hooks for pygments syntax highlighting.""" -import os -import re -import sys -import builtins -from collections import ChainMap -from collections.abc import MutableMapping - -from pygments.lexer import inherit, bygroups, include +from pygments.lexer import inherit, bygroups, using, this +from pygments.token import Name, Generic, Keyword, Text, String +from pygments.lexers.shell import BashLexer from pygments.lexers.agile import PythonLexer -from pygments.token import ( - Keyword, - Name, - Comment, - String, - Error, - Number, - Operator, - Generic, - Whitespace, - Token, - Punctuation, - Text, -) -from pygments.style import Style -import pygments.util - -from xonsh.commands_cache import CommandsCache -from xonsh.lazyasd import LazyObject, LazyDict, lazyobject -from xonsh.tools import ( - ON_WINDOWS, - intensify_colors_for_cmd_exe, - ansicolors_to_ptk1_names, - ANSICOLOR_NAMES_MAP, - PTK_NEW_OLD_COLOR_MAP, - hardcode_colors_for_win10, - FORMATTER, -) - -from xonsh.color_tools import ( - RE_BACKGROUND, - BASE_XONSH_COLORS, - make_palette, - find_closest_color, -) -from xonsh.style_tools import norm_name -from xonsh.lazyimps import terminal256 -from xonsh.platform import ( - os_environ, - win_ansi_support, - ptk_version_info, - pygments_version_info, -) - -from xonsh.pygments_cache import get_style_by_name - -def _command_is_valid(cmd): - try: - cmd_abspath = os.path.abspath(os.path.expanduser(cmd)) - except (FileNotFoundError, OSError): - return False - return cmd in builtins.__xonsh__.commands_cache or ( - os.path.isfile(cmd_abspath) and os.access(cmd_abspath, os.X_OK) - ) +class XonshSubprocLexer(BashLexer): + """Lexer for xonsh subproc mode.""" -def _command_is_autocd(cmd): - if not builtins.__xonsh__.env.get("AUTO_CD", False): - return False - try: - cmd_abspath = os.path.abspath(os.path.expanduser(cmd)) - except (FileNotFoundError, OSError): - return False - return os.path.isdir(cmd_abspath) + name = 'Xonsh subprocess lexer' + tokens = {'root': [(r'`[^`]*?`', String.Backtick), inherit, ]} -def subproc_cmd_callback(_, match): - """Yield Builtin token if match contains valid command, - otherwise fallback to fallback lexer. - """ - cmd = match.group() - yield match.start(), Name.Builtin if _command_is_valid(cmd) else Error, cmd +ROOT_TOKENS = [(r'\?', Keyword), + (r'\$\w+', Name.Variable), + (r'\$\{', Keyword, ('pymode', )), + (r'\$\(', Keyword, ('subproc', )), + (r'\$\[', Keyword, ('subproc', )), + (r'@\(', Keyword, ('pymode', )), + inherit, ] -def subproc_arg_callback(_, match): - """Check if match contains valid path""" - text = match.group() - try: - ispath = os.path.exists(os.path.expanduser(text)) - except (FileNotFoundError, OSError): - ispath = False - yield (match.start(), Name.Constant if ispath else Text, text) +PYMODE_TOKENS = [(r'(.+)(\))', bygroups(using(this), Keyword), '#pop'), + (r'(.+)(\})', bygroups(using(this), Keyword), '#pop'), ] - -COMMAND_TOKEN_RE = r'[^=\s\[\]{}()$"\'`<&|;!]+(?=\s|$|\)|\]|\}|!)' +SUBPROC_TOKENS = [ + (r'(.+)(\))', bygroups(using(XonshSubprocLexer), Keyword), '#pop'), + (r'(.+)(\])', bygroups(using(XonshSubprocLexer), Keyword), '#pop'), +] class XonshLexer(PythonLexer): """Xonsh console lexer for pygments.""" - name = "Xonsh lexer" - aliases = ["xonsh", "xsh"] - filenames = ["*.xsh", "*xonshrc"] - - def __init__(self, *args, **kwargs): - # If the lexer is loaded as a pygment plugin, we have to mock - # __xonsh__.env and __xonsh__.commands_cache - if not hasattr(builtins, "__xonsh__"): - from argparse import Namespace - - setattr(builtins, "__xonsh__", Namespace()) - if not hasattr(builtins.__xonsh__, "env"): - setattr(builtins.__xonsh__, "env", {}) - if ON_WINDOWS: - pathext = os_environ.get("PATHEXT", [".EXE", ".BAT", ".CMD"]) - builtins.__xonsh__.env["PATHEXT"] = pathext.split(os.pathsep) - if not hasattr(builtins.__xonsh__, "commands_cache"): - setattr(builtins.__xonsh__, "commands_cache", CommandsCache()) - _ = builtins.__xonsh__.commands_cache.all_commands # NOQA - super().__init__(*args, **kwargs) + name = 'Xonsh lexer' + aliases = ['xonsh', 'xsh'] + filenames = ['*.xsh', '*xonshrc'] tokens = { - "mode_switch_brackets": [ - (r"(\$)(\{)", bygroups(Keyword, Punctuation), "py_curly_bracket"), - (r"(@)(\()", bygroups(Keyword, Punctuation), "py_bracket"), - ( - r"([\!\$])(\()", - bygroups(Keyword, Punctuation), - ("subproc_bracket", "subproc_start"), - ), - ( - r"(@\$)(\()", - bygroups(Keyword, Punctuation), - ("subproc_bracket", "subproc_start"), - ), - ( - r"([\!\$])(\[)", - bygroups(Keyword, Punctuation), - ("subproc_square_bracket", "subproc_start"), - ), - (r"(g?)(`)", bygroups(String.Affix, String.Backtick), "backtick_re"), - ], - "subproc_bracket": [(r"\)", Punctuation, "#pop"), include("subproc")], - "subproc_square_bracket": [(r"\]", Punctuation, "#pop"), include("subproc")], - "py_bracket": [(r"\)", Punctuation, "#pop"), include("root")], - "py_curly_bracket": [(r"\}", Punctuation, "#pop"), include("root")], - "backtick_re": [ - (r"[\.\^\$\*\+\?\[\]\|]", String.Regex), - (r"({[0-9]+}|{[0-9]+,[0-9]+})\??", String.Regex), - (r"\\([0-9]+|[AbBdDsSwWZabfnrtuUvx\\])", String.Escape), - (r"`", String.Backtick, "#pop"), - (r"[^`\.\^\$\*\+\?\[\]\|]+", String.Backtick), - ], - "root": [ - (r"\?", Keyword), - (r"(?<=\w)!", Keyword), - (r"\$\w+", Name.Variable), - (r"\(", Punctuation, "py_bracket"), - (r"\{", Punctuation, "py_curly_bracket"), - include("mode_switch_brackets"), - inherit, - ], - "subproc_start": [ - (r"\s+", Whitespace), - (COMMAND_TOKEN_RE, subproc_cmd_callback, "#pop"), - (r"", Whitespace, "#pop"), - ], - "subproc": [ - include("mode_switch_brackets"), - (r"&&|\|\|", Operator, "subproc_start"), - (r'"(\\\\|\\[0-7]+|\\.|[^"\\])*"', String.Double), - (r"'(\\\\|\\[0-7]+|\\.|[^'\\])*'", String.Single), - (r"(?<=\w|\s)!", Keyword, "subproc_macro"), - (r"^!", Keyword, "subproc_macro"), - (r";", Punctuation, "subproc_start"), - (r"&|=", Punctuation), - (r"\|", Punctuation, "subproc_start"), - (r"\s+", Text), - (r'[^=\s\[\]{}()$"\'`<&|;]+', subproc_arg_callback), - (r"<", Text), - (r"\$\w+", Name.Variable), - ], - "subproc_macro": [ - (r"(\s*)([^\n]+)", bygroups(Whitespace, String)), - (r"", Whitespace, "#pop"), - ], + 'root': list(ROOT_TOKENS), + 'pymode': PYMODE_TOKENS, + 'subproc': SUBPROC_TOKENS, } - def get_tokens_unprocessed(self, text): - """Check first command, then call super.get_tokens_unprocessed - with root or subproc state""" - start = 0 - state = ("root",) - m = re.match(r"(\s*)({})".format(COMMAND_TOKEN_RE), text) - if m is not None: - yield m.start(1), Whitespace, m.group(1) - cmd = m.group(2) - cmd_is_valid = _command_is_valid(cmd) - cmd_is_autocd = _command_is_autocd(cmd) - - if cmd_is_valid or cmd_is_autocd: - yield (m.start(2), Name.Builtin if cmd_is_valid else Name.Constant, cmd) - start = m.end(2) - state = ("subproc",) - - for i, t, v in super().get_tokens_unprocessed(text[start:], state): - yield i + start, t, v - -class XonshConsoleLexer(XonshLexer): +class XonshConsoleLexer(PythonLexer): """Xonsh console lexer for pygments.""" - name = "Xonsh console lexer" - aliases = ["xonshcon"] + name = 'Xonsh console lexer' + aliases = ['xonshcon'] filenames = [] tokens = { - "root": [ - (r"^(>>>|\.\.\.) ", Generic.Prompt), - (r"\n(>>>|\.\.\.)", Generic.Prompt), - (r"\n(?![>.][>.][>.] )([^\n]*)", Generic.Output), - (r"\n(?![>.][>.][>.] )(.*?)$", Generic.Output), - inherit, - ] - } - - -# -# Colors and Styles -# - -Color = Token.Color # alias to new color token namespace - - -def color_by_name(name, fg=None, bg=None): - """Converts a color name to a color token, foreground name, - and background name. Will take into consideration current foreground - and background colors, if provided. - - Parameters - ---------- - name : str - Color name. - fg : str, optional - Foreground color name. - bg : str, optional - Background color name. - - Returns - ------- - tok : Token - Pygments Token.Color subclass - fg : str or None - New computed foreground color name. - bg : str or None - New computed background color name. - """ - name = name.upper() - if name == "NO_COLOR": - return Color.NO_COLOR, None, None - m = RE_BACKGROUND.search(name) - if m is None: # must be foreground color - fg = norm_name(name) - else: - bg = norm_name(name) - # assemble token - if fg is None and bg is None: - tokname = "NO_COLOR" - elif fg is None: - tokname = bg - elif bg is None: - tokname = fg - else: - tokname = fg + "__" + bg - tok = getattr(Color, tokname) - return tok, fg, bg - - -def code_by_name(name, styles): - """Converts a token name into a pygments-style color code. - - Parameters - ---------- - name : str - Color token name. - styles : Mapping - Mapping for looking up non-hex colors - - Returns - ------- - code : str - Pygments style color code. - """ - fg, _, bg = name.lower().partition("__") - if fg.startswith("background_"): - fg, bg = bg, fg - codes = [] - # foreground color - if len(fg) == 0: - pass - elif "hex" in fg: - for p in fg.split("_"): - codes.append("#" + p[3:] if p.startswith("hex") else p) - else: - fgtok = getattr(Color, fg.upper()) - if fgtok in styles: - codes.append(styles[fgtok]) - else: - codes += fg.split("_") - # background color - if len(bg) == 0: - pass - elif bg.startswith("background_hex"): - codes.append("bg:#" + bg[14:]) - else: - bgtok = getattr(Color, bg.upper()) - if bgtok in styles: - codes.append(styles[bgtok]) - else: - codes.append(bg.replace("background_", "bg:")) - code = " ".join(codes) - return code - - -def partial_color_tokenize(template): - """Tokenizes a template string containing colors. Will return a list - of tuples mapping the token to the string which has that color. - These sub-strings maybe templates themselves. - """ - if builtins.__xonsh__.shell is not None: - styles = __xonsh__.shell.shell.styler.styles - else: - styles = None - color = Color.NO_COLOR - try: - toks, color = _partial_color_tokenize_main(template, styles) - except Exception: - toks = [(Color.NO_COLOR, template)] - if styles is not None: - styles[color] # ensure color is available - return toks - - -def _partial_color_tokenize_main(template, styles): - bopen = "{" - bclose = "}" - colon = ":" - expl = "!" - color = Color.NO_COLOR - fg = bg = None - value = "" - toks = [] - for literal, field, spec, conv in FORMATTER.parse(template): - if field is None: - value += literal - elif field in KNOWN_COLORS or "#" in field: - value += literal - next_color, fg, bg = color_by_name(field, fg, bg) - if next_color is not color: - if len(value) > 0: - toks.append((color, value)) - if styles is not None: - styles[color] # ensure color is available - color = next_color - value = "" - elif field is not None: - parts = [literal, bopen, field] - if conv is not None and len(conv) > 0: - parts.append(expl) - parts.append(conv) - if spec is not None and len(spec) > 0: - parts.append(colon) - parts.append(spec) - parts.append(bclose) - value += "".join(parts) - else: - value += literal - toks.append((color, value)) - return toks, color - - -class CompoundColorMap(MutableMapping): - """Looks up color tokens by name, potentially generating the value - from the lookup. - """ - - def __init__(self, styles, *args, **kwargs): - self.styles = styles - self.colors = dict(*args, **kwargs) - - def __getitem__(self, key): - if key in self.colors: - return self.colors[key] - if key in self.styles: - value = self.styles[key] - self[key] = value - return value - if key is Color: - raise KeyError - pre, _, name = str(key).rpartition(".") - if pre != "Token.Color": - raise KeyError - value = code_by_name(name, self.styles) - self[key] = value - return value - - def __setitem__(self, key, value): - self.colors[key] = value - - def __delitem__(self, key): - del self.colors[key] - - def __iter__(self): - yield from self.colors.keys() - - def __len__(self): - return len(self.colors) - - -class XonshStyle(Style): - """A xonsh pygments style that will dispatch to the correct color map - by using a ChainMap. The style_name property may be used to reset - the current style. - """ - - def __init__(self, style_name="default"): - """ - Parameters - ---------- - style_name : str, optional - The style name to initialize with. - """ - self.trap = {} # for trapping custom colors set by user - self._smap = {} - self._style_name = "" - self.style_name = style_name - super().__init__() - - @property - def style_name(self): - return self._style_name - - @style_name.setter - def style_name(self, value): - if self._style_name == value: - return - if value not in STYLES: - try: # loading style dynamically - pygments_style_by_name(value) - except Exception: - print( - "Could not find style {0!r}, using default".format(value), - file=sys.stderr, - ) - value = "default" - builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = value - cmap = STYLES[value] - if value == "default": - self._smap = XONSH_BASE_STYLE.copy() - else: - try: - self._smap = get_style_by_name(value)().styles.copy() - except (ImportError, pygments.util.ClassNotFound): - self._smap = XONSH_BASE_STYLE.copy() - compound = CompoundColorMap(ChainMap(self.trap, cmap, PTK_STYLE, self._smap)) - self.styles = ChainMap(self.trap, cmap, PTK_STYLE, self._smap, compound) - self._style_name = value - # Convert new ansicolor names to old PTK1 names - # Can be remvoed when PTK1 support is dropped. - if builtins.__xonsh__.shell.shell_type != "prompt_toolkit2": - for smap in [self.trap, cmap, PTK_STYLE, self._smap]: - smap.update(ansicolors_to_ptk1_names(smap)) - if ON_WINDOWS and "prompt_toolkit" in builtins.__xonsh__.shell.shell_type: - self.enhance_colors_for_cmd_exe() - - @style_name.deleter - def style_name(self): - self._style_name = "" - - def enhance_colors_for_cmd_exe(self): - """ Enhance colors when using cmd.exe on windows. - When using the default style all blue and dark red colors - are changed to CYAN and intense red. - """ - env = builtins.__xonsh__.env - # Ensure we are not using ConEmu or Visual Stuio Code - if "CONEMUANSI" in env or "VSCODE_PID" in env: - return - if env.get("INTENSIFY_COLORS_ON_WIN", False): - if win_ansi_support(): - newcolors = hardcode_colors_for_win10(self.styles) - else: - newcolors = intensify_colors_for_cmd_exe(self.styles) - self.trap.update(newcolors) - - -def xonsh_style_proxy(styler): - """Factory for a proxy class to a xonsh style.""" - # Monky patch pygments' list of known ansi colors - # with the new ansi color names used by PTK2 - # Can be removed once pygment names get fixed. - pygments.style.ansicolors.update(ANSICOLOR_NAMES_MAP) - - class XonshStyleProxy(Style): - """Simple proxy class to fool prompt toolkit.""" - - target = styler - styles = styler.styles - - def __new__(cls, *args, **kwargs): - return cls.target - - return XonshStyleProxy - - -PTK_STYLE = LazyObject( - lambda: { - Token.Menu.Completions: "bg:ansigray ansiblack", - Token.Menu.Completions.Completion: "", - Token.Menu.Completions.Completion.Current: "bg:ansibrightblack ansiwhite", - Token.Scrollbar: "bg:ansibrightblack", - Token.Scrollbar.Button: "bg:ansiblack", - Token.Scrollbar.Arrow: "bg:ansiblack ansiwhite bold", - Token.AutoSuggestion: "ansibrightblack", - Token.Aborted: "ansibrightblack", - }, - globals(), - "PTK_STYLE", -) - - -XONSH_BASE_STYLE = LazyObject( - lambda: { - Whitespace: "ansigray", - Comment: "underline ansicyan", - Comment.Preproc: "underline ansiyellow", - Keyword: "bold ansigreen", - Keyword.Pseudo: "nobold", - Keyword.Type: "nobold ansired", - Operator: "ansibrightblack", - Operator.Word: "bold ansimagenta", - Name.Builtin: "ansigreen", - Name.Function: "ansibrightblue", - Name.Class: "bold ansibrightblue", - Name.Namespace: "bold ansibrightblue", - Name.Exception: "bold ansibrightred", - Name.Variable: "ansiblue", - Name.Constant: "ansired", - Name.Label: "ansibrightyellow", - Name.Entity: "bold ansigray", - Name.Attribute: "ansibrightyellow", - Name.Tag: "bold ansigreen", - Name.Decorator: "ansibrightmagenta", - String: "ansibrightred", - String.Doc: "underline", - String.Interpol: "bold ansimagenta", - String.Escape: "bold ansiyellow", - String.Regex: "ansimagenta", - String.Symbol: "ansiyellow", - String.Other: "ansigreen", - Number: "ansibrightblack", - Generic.Heading: "bold ansiblue", - Generic.Subheading: "bold ansimagenta", - Generic.Deleted: "ansired", - Generic.Inserted: "ansibrightgreen", - Generic.Error: "bold ansibrightred", - Generic.Emph: "underline", - Generic.Prompt: "bold ansiblue", - Generic.Output: "ansiblue", - Generic.Traceback: "ansiblue", - Error: "ansibrightred", - }, - globals(), - "XONSH_BASE_STYLE", -) - - -KNOWN_COLORS = LazyObject( - lambda: frozenset( - [ - "BACKGROUND_BLACK", - "BACKGROUND_BLUE", - "BACKGROUND_CYAN", - "BACKGROUND_GREEN", - "BACKGROUND_INTENSE_BLACK", - "BACKGROUND_INTENSE_BLUE", - "BACKGROUND_INTENSE_CYAN", - "BACKGROUND_INTENSE_GREEN", - "BACKGROUND_INTENSE_PURPLE", - "BACKGROUND_INTENSE_RED", - "BACKGROUND_INTENSE_WHITE", - "BACKGROUND_INTENSE_YELLOW", - "BACKGROUND_PURPLE", - "BACKGROUND_RED", - "BACKGROUND_WHITE", - "BACKGROUND_YELLOW", - "BLACK", - "BLUE", - "BOLD_BLACK", - "BOLD_BLUE", - "BOLD_CYAN", - "BOLD_GREEN", - "BOLD_INTENSE_BLACK", - "BOLD_INTENSE_BLUE", - "BOLD_INTENSE_CYAN", - "BOLD_INTENSE_GREEN", - "BOLD_INTENSE_PURPLE", - "BOLD_INTENSE_RED", - "BOLD_INTENSE_WHITE", - "BOLD_INTENSE_YELLOW", - "BOLD_PURPLE", - "BOLD_RED", - "BOLD_UNDERLINE_BLACK", - "BOLD_UNDERLINE_BLUE", - "BOLD_UNDERLINE_CYAN", - "BOLD_UNDERLINE_GREEN", - "BOLD_UNDERLINE_INTENSE_BLACK", - "BOLD_UNDERLINE_INTENSE_BLUE", - "BOLD_UNDERLINE_INTENSE_CYAN", - "BOLD_UNDERLINE_INTENSE_GREEN", - "BOLD_UNDERLINE_INTENSE_PURPLE", - "BOLD_UNDERLINE_INTENSE_RED", - "BOLD_UNDERLINE_INTENSE_WHITE", - "BOLD_UNDERLINE_INTENSE_YELLOW", - "BOLD_UNDERLINE_PURPLE", - "BOLD_UNDERLINE_RED", - "BOLD_UNDERLINE_WHITE", - "BOLD_UNDERLINE_YELLOW", - "BOLD_WHITE", - "BOLD_YELLOW", - "CYAN", - "GREEN", - "INTENSE_BLACK", - "INTENSE_BLUE", - "INTENSE_CYAN", - "INTENSE_GREEN", - "INTENSE_PURPLE", - "INTENSE_RED", - "INTENSE_WHITE", - "INTENSE_YELLOW", - "NO_COLOR", - "PURPLE", - "RED", - "UNDERLINE_BLACK", - "UNDERLINE_BLUE", - "UNDERLINE_CYAN", - "UNDERLINE_GREEN", - "UNDERLINE_INTENSE_BLACK", - "UNDERLINE_INTENSE_BLUE", - "UNDERLINE_INTENSE_CYAN", - "UNDERLINE_INTENSE_GREEN", - "UNDERLINE_INTENSE_PURPLE", - "UNDERLINE_INTENSE_RED", - "UNDERLINE_INTENSE_WHITE", - "UNDERLINE_INTENSE_YELLOW", - "UNDERLINE_PURPLE", - "UNDERLINE_RED", - "UNDERLINE_WHITE", - "UNDERLINE_YELLOW", - "WHITE", - "YELLOW", - ] - ), - globals(), - "KNOWN_COLORS", -) - - -def _expand_style(cmap): - """Expands a style in order to more quickly make color map changes.""" - for key, val in list(cmap.items()): - if key is Color.NO_COLOR: - continue - _, _, key = str(key).rpartition(".") - cmap[getattr(Color, "BOLD_" + key)] = "bold " + val - cmap[getattr(Color, "UNDERLINE_" + key)] = "underline " + val - cmap[getattr(Color, "BOLD_UNDERLINE_" + key)] = "bold underline " + val - if val == "noinherit": - cmap[getattr(Color, "BACKGROUND_" + key)] = val - else: - cmap[getattr(Color, "BACKGROUND_" + key)] = "bg:" + val - - -def _bw_style(): - style = { - Color.BLACK: "noinherit", - Color.BLUE: "noinherit", - Color.CYAN: "noinherit", - Color.GREEN: "noinherit", - Color.INTENSE_BLACK: "noinherit", - Color.INTENSE_BLUE: "noinherit", - Color.INTENSE_CYAN: "noinherit", - Color.INTENSE_GREEN: "noinherit", - Color.INTENSE_PURPLE: "noinherit", - Color.INTENSE_RED: "noinherit", - Color.INTENSE_WHITE: "noinherit", - Color.INTENSE_YELLOW: "noinherit", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "noinherit", - Color.RED: "noinherit", - Color.WHITE: "noinherit", - Color.YELLOW: "noinherit", - } - _expand_style(style) - return style - - -def _default_style(): - style = { - Color.BLACK: "ansiblack", - Color.BLUE: "ansiblue", - Color.CYAN: "ansicyan", - Color.GREEN: "ansigreen", - Color.INTENSE_BLACK: "ansibrightblack", - Color.INTENSE_BLUE: "ansibrightblue", - Color.INTENSE_CYAN: "ansibrightcyan", - Color.INTENSE_GREEN: "ansibrightgreen", - Color.INTENSE_PURPLE: "ansibrightmagenta", - Color.INTENSE_RED: "ansibrightred", - Color.INTENSE_WHITE: "ansiwhite", - Color.INTENSE_YELLOW: "ansibrightyellow", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "ansimagenta", - Color.RED: "ansired", - Color.WHITE: "ansigray", - Color.YELLOW: "ansiyellow", - } - _expand_style(style) - return style - - -def _monokai_style(): - style = { - Color.BLACK: "#1e0010", - Color.BLUE: "#6666ef", - Color.CYAN: "#66d9ef", - Color.GREEN: "#2ee22e", - Color.INTENSE_BLACK: "#5e5e5e", - Color.INTENSE_BLUE: "#2626d7", - Color.INTENSE_CYAN: "#2ed9d9", - Color.INTENSE_GREEN: "#a6e22e", - Color.INTENSE_PURPLE: "#ae81ff", - Color.INTENSE_RED: "#f92672", - Color.INTENSE_WHITE: "#f8f8f2", - Color.INTENSE_YELLOW: "#e6db74", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#960050", - Color.RED: "#AF0000", - Color.WHITE: "#d7d7d7", - Color.YELLOW: "#e2e22e", - } - _expand_style(style) - return style - - -###################################### -# Auto-generated below this line # -###################################### -def _algol_style(): - style = { - Color.BLACK: "#666", - Color.BLUE: "#666", - Color.CYAN: "#666", - Color.GREEN: "#666", - Color.INTENSE_BLACK: "#666", - Color.INTENSE_BLUE: "#888", - Color.INTENSE_CYAN: "#888", - Color.INTENSE_GREEN: "#888", - Color.INTENSE_PURPLE: "#888", - Color.INTENSE_RED: "#FF0000", - Color.INTENSE_WHITE: "#888", - Color.INTENSE_YELLOW: "#888", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#666", - Color.RED: "#FF0000", - Color.WHITE: "#888", - Color.YELLOW: "#FF0000", - } - _expand_style(style) - return style - - -def _algol_nu_style(): - style = { - Color.BLACK: "#666", - Color.BLUE: "#666", - Color.CYAN: "#666", - Color.GREEN: "#666", - Color.INTENSE_BLACK: "#666", - Color.INTENSE_BLUE: "#888", - Color.INTENSE_CYAN: "#888", - Color.INTENSE_GREEN: "#888", - Color.INTENSE_PURPLE: "#888", - Color.INTENSE_RED: "#FF0000", - Color.INTENSE_WHITE: "#888", - Color.INTENSE_YELLOW: "#888", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#666", - Color.RED: "#FF0000", - Color.WHITE: "#888", - Color.YELLOW: "#FF0000", - } - _expand_style(style) - return style - - -def _autumn_style(): - style = { - Color.BLACK: "#000080", - Color.BLUE: "#0000aa", - Color.CYAN: "#00aaaa", - Color.GREEN: "#00aa00", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#1e90ff", - Color.INTENSE_CYAN: "#1e90ff", - Color.INTENSE_GREEN: "#4c8317", - Color.INTENSE_PURPLE: "#FAA", - Color.INTENSE_RED: "#aa5500", - Color.INTENSE_WHITE: "#bbbbbb", - Color.INTENSE_YELLOW: "#FAA", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#aa0000", - Color.WHITE: "#aaaaaa", - Color.YELLOW: "#aa5500", - } - _expand_style(style) - return style - - -def _borland_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#000080", - Color.CYAN: "#008080", - Color.GREEN: "#008800", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#0000FF", - Color.INTENSE_CYAN: "#ddffdd", - Color.INTENSE_GREEN: "#888888", - Color.INTENSE_PURPLE: "#e3d2d2", - Color.INTENSE_RED: "#FF0000", - Color.INTENSE_WHITE: "#ffdddd", - Color.INTENSE_YELLOW: "#e3d2d2", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#aa0000", - Color.WHITE: "#aaaaaa", - Color.YELLOW: "#a61717", - } - _expand_style(style) - return style - - -def _colorful_style(): - style = { - Color.BLACK: "#000", - Color.BLUE: "#00C", - Color.CYAN: "#0e84b5", - Color.GREEN: "#00A000", - Color.INTENSE_BLACK: "#555", - Color.INTENSE_BLUE: "#33B", - Color.INTENSE_CYAN: "#bbbbbb", - Color.INTENSE_GREEN: "#888", - Color.INTENSE_PURPLE: "#FAA", - Color.INTENSE_RED: "#D42", - Color.INTENSE_WHITE: "#fff0ff", - Color.INTENSE_YELLOW: "#FAA", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#A00000", - Color.WHITE: "#bbbbbb", - Color.YELLOW: "#A60", - } - _expand_style(style) - return style - - -def _emacs_style(): - style = { - Color.BLACK: "#008000", - Color.BLUE: "#000080", - Color.CYAN: "#04D", - Color.GREEN: "#00A000", - Color.INTENSE_BLACK: "#666666", - Color.INTENSE_BLUE: "#04D", - Color.INTENSE_CYAN: "#bbbbbb", - Color.INTENSE_GREEN: "#00BB00", - Color.INTENSE_PURPLE: "#AA22FF", - Color.INTENSE_RED: "#D2413A", - Color.INTENSE_WHITE: "#bbbbbb", - Color.INTENSE_YELLOW: "#bbbbbb", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#A00000", - Color.WHITE: "#bbbbbb", - Color.YELLOW: "#BB6622", - } - _expand_style(style) - return style - - -def _friendly_style(): - style = { - Color.BLACK: "#007020", - Color.BLUE: "#000080", - Color.CYAN: "#0e84b5", - Color.GREEN: "#00A000", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#70a0d0", - Color.INTENSE_CYAN: "#60add5", - Color.INTENSE_GREEN: "#40a070", - Color.INTENSE_PURPLE: "#bb60d5", - Color.INTENSE_RED: "#d55537", - Color.INTENSE_WHITE: "#fff0f0", - Color.INTENSE_YELLOW: "#bbbbbb", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#A00000", - Color.WHITE: "#bbbbbb", - Color.YELLOW: "#c65d09", - } - _expand_style(style) - return style - - -def _fruity_style(): - style = { - Color.BLACK: "#0f140f", - Color.BLUE: "#0086d2", - Color.CYAN: "#0086d2", - Color.GREEN: "#008800", - Color.INTENSE_BLACK: "#444444", - Color.INTENSE_BLUE: "#0086f7", - Color.INTENSE_CYAN: "#0086f7", - Color.INTENSE_GREEN: "#888888", - Color.INTENSE_PURPLE: "#ff0086", - Color.INTENSE_RED: "#fb660a", - Color.INTENSE_WHITE: "#ffffff", - Color.INTENSE_YELLOW: "#cdcaa9", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#ff0086", - Color.RED: "#ff0007", - Color.WHITE: "#cdcaa9", - Color.YELLOW: "#fb660a", - } - _expand_style(style) - return style - - -def _igor_style(): - style = { - Color.BLACK: "#009C00", - Color.BLUE: "#0000FF", - Color.CYAN: "#007575", - Color.GREEN: "#009C00", - Color.INTENSE_BLACK: "#007575", - Color.INTENSE_BLUE: "#0000FF", - Color.INTENSE_CYAN: "#007575", - Color.INTENSE_GREEN: "#009C00", - Color.INTENSE_PURPLE: "#CC00A3", - Color.INTENSE_RED: "#C34E00", - Color.INTENSE_WHITE: "#CC00A3", - Color.INTENSE_YELLOW: "#C34E00", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#CC00A3", - Color.RED: "#C34E00", - Color.WHITE: "#CC00A3", - Color.YELLOW: "#C34E00", - } - _expand_style(style) - return style - - -def _lovelace_style(): - style = { - Color.BLACK: "#444444", - Color.BLUE: "#2838b0", - Color.CYAN: "#289870", - Color.GREEN: "#388038", - Color.INTENSE_BLACK: "#666666", - Color.INTENSE_BLUE: "#2838b0", - Color.INTENSE_CYAN: "#888888", - Color.INTENSE_GREEN: "#289870", - Color.INTENSE_PURPLE: "#a848a8", - Color.INTENSE_RED: "#b83838", - Color.INTENSE_WHITE: "#888888", - Color.INTENSE_YELLOW: "#a89028", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#a848a8", - Color.RED: "#c02828", - Color.WHITE: "#888888", - Color.YELLOW: "#b85820", - } - _expand_style(style) - return style - - -def _manni_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#000099", - Color.CYAN: "#009999", - Color.GREEN: "#00CC00", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#9999FF", - Color.INTENSE_CYAN: "#00CCFF", - Color.INTENSE_GREEN: "#99CC66", - Color.INTENSE_PURPLE: "#CC00FF", - Color.INTENSE_RED: "#FF6600", - Color.INTENSE_WHITE: "#FFCCCC", - Color.INTENSE_YELLOW: "#FFCC33", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#CC00FF", - Color.RED: "#AA0000", - Color.WHITE: "#AAAAAA", - Color.YELLOW: "#CC3300", - } - _expand_style(style) - return style - - -def _murphy_style(): - style = { - Color.BLACK: "#000", - Color.BLUE: "#000080", - Color.CYAN: "#0e84b5", - Color.GREEN: "#00A000", - Color.INTENSE_BLACK: "#555", - Color.INTENSE_BLUE: "#66f", - Color.INTENSE_CYAN: "#5ed", - Color.INTENSE_GREEN: "#5ed", - Color.INTENSE_PURPLE: "#e9e", - Color.INTENSE_RED: "#f84", - Color.INTENSE_WHITE: "#eee", - Color.INTENSE_YELLOW: "#fc8", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#A00000", - Color.WHITE: "#bbbbbb", - Color.YELLOW: "#c65d09", - } - _expand_style(style) - return style - - -def _native_style(): - style = { - Color.BLACK: "#520000", - Color.BLUE: "#3677a9", - Color.CYAN: "#24909d", - Color.GREEN: "#589819", - Color.INTENSE_BLACK: "#666666", - Color.INTENSE_BLUE: "#447fcf", - Color.INTENSE_CYAN: "#40ffff", - Color.INTENSE_GREEN: "#6ab825", - Color.INTENSE_PURPLE: "#e3d2d2", - Color.INTENSE_RED: "#cd2828", - Color.INTENSE_WHITE: "#ffffff", - Color.INTENSE_YELLOW: "#ed9d13", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#666666", - Color.RED: "#a61717", - Color.WHITE: "#aaaaaa", - Color.YELLOW: "#a61717", - } - _expand_style(style) - return style - - -def _paraiso_dark_style(): - style = { - Color.BLACK: "#776e71", - Color.BLUE: "#815ba4", - Color.CYAN: "#06b6ef", - Color.GREEN: "#48b685", - Color.INTENSE_BLACK: "#776e71", - Color.INTENSE_BLUE: "#815ba4", - Color.INTENSE_CYAN: "#5bc4bf", - Color.INTENSE_GREEN: "#48b685", - Color.INTENSE_PURPLE: "#e7e9db", - Color.INTENSE_RED: "#ef6155", - Color.INTENSE_WHITE: "#e7e9db", - Color.INTENSE_YELLOW: "#fec418", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#815ba4", - Color.RED: "#ef6155", - Color.WHITE: "#5bc4bf", - Color.YELLOW: "#f99b15", - } - _expand_style(style) - return style - - -def _paraiso_light_style(): - style = { - Color.BLACK: "#2f1e2e", - Color.BLUE: "#2f1e2e", - Color.CYAN: "#06b6ef", - Color.GREEN: "#48b685", - Color.INTENSE_BLACK: "#2f1e2e", - Color.INTENSE_BLUE: "#815ba4", - Color.INTENSE_CYAN: "#5bc4bf", - Color.INTENSE_GREEN: "#48b685", - Color.INTENSE_PURPLE: "#815ba4", - Color.INTENSE_RED: "#ef6155", - Color.INTENSE_WHITE: "#5bc4bf", - Color.INTENSE_YELLOW: "#fec418", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#815ba4", - Color.RED: "#2f1e2e", - Color.WHITE: "#8d8687", - Color.YELLOW: "#f99b15", - } - _expand_style(style) - return style - - -def _pastie_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#0000DD", - Color.CYAN: "#0066bb", - Color.GREEN: "#008800", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#3333bb", - Color.INTENSE_CYAN: "#ddffdd", - Color.INTENSE_GREEN: "#22bb22", - Color.INTENSE_PURPLE: "#e3d2d2", - Color.INTENSE_RED: "#dd7700", - Color.INTENSE_WHITE: "#fff0ff", - Color.INTENSE_YELLOW: "#e3d2d2", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#bb0066", - Color.RED: "#aa0000", - Color.WHITE: "#bbbbbb", - Color.YELLOW: "#aa6600", - } - _expand_style(style) - return style - - -def _perldoc_style(): - style = { - Color.BLACK: "#000080", - Color.BLUE: "#000080", - Color.CYAN: "#1e889b", - Color.GREEN: "#00aa00", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#B452CD", - Color.INTENSE_CYAN: "#bbbbbb", - Color.INTENSE_GREEN: "#228B22", - Color.INTENSE_PURPLE: "#B452CD", - Color.INTENSE_RED: "#CD5555", - Color.INTENSE_WHITE: "#e3d2d2", - Color.INTENSE_YELLOW: "#e3d2d2", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#8B008B", - Color.RED: "#aa0000", - Color.WHITE: "#a7a7a7", - Color.YELLOW: "#cb6c20", - } - _expand_style(style) - return style - - -def _rrt_style(): - style = { - Color.BLACK: "#ff0000", - Color.BLUE: "#87ceeb", - Color.CYAN: "#87ceeb", - Color.GREEN: "#00ff00", - Color.INTENSE_BLACK: "#87ceeb", - Color.INTENSE_BLUE: "#87ceeb", - Color.INTENSE_CYAN: "#7fffd4", - Color.INTENSE_GREEN: "#00ff00", - Color.INTENSE_PURPLE: "#ee82ee", - Color.INTENSE_RED: "#ff0000", - Color.INTENSE_WHITE: "#e5e5e5", - Color.INTENSE_YELLOW: "#eedd82", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#ee82ee", - Color.RED: "#ff0000", - Color.WHITE: "#87ceeb", - Color.YELLOW: "#ff0000", - } - _expand_style(style) - return style - - -def _tango_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#0000cf", - Color.CYAN: "#3465a4", - Color.GREEN: "#00A000", - Color.INTENSE_BLACK: "#204a87", - Color.INTENSE_BLUE: "#5c35cc", - Color.INTENSE_CYAN: "#f8f8f8", - Color.INTENSE_GREEN: "#4e9a06", - Color.INTENSE_PURPLE: "#f8f8f8", - Color.INTENSE_RED: "#ef2929", - Color.INTENSE_WHITE: "#f8f8f8", - Color.INTENSE_YELLOW: "#c4a000", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#a40000", - Color.WHITE: "#f8f8f8", - Color.YELLOW: "#8f5902", - } - _expand_style(style) - return style - - -def _trac_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#000080", - Color.CYAN: "#009999", - Color.GREEN: "#808000", - Color.INTENSE_BLACK: "#555555", - Color.INTENSE_BLUE: "#445588", - Color.INTENSE_CYAN: "#ddffdd", - Color.INTENSE_GREEN: "#999988", - Color.INTENSE_PURPLE: "#e3d2d2", - Color.INTENSE_RED: "#bb8844", - Color.INTENSE_WHITE: "#ffdddd", - Color.INTENSE_YELLOW: "#e3d2d2", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#800080", - Color.RED: "#aa0000", - Color.WHITE: "#aaaaaa", - Color.YELLOW: "#808000", - } - _expand_style(style) - return style - - -def _vim_style(): - style = { - Color.BLACK: "#000080", - Color.BLUE: "#000080", - Color.CYAN: "#00cdcd", - Color.GREEN: "#00cd00", - Color.INTENSE_BLACK: "#666699", - Color.INTENSE_BLUE: "#3399cc", - Color.INTENSE_CYAN: "#00cdcd", - Color.INTENSE_GREEN: "#00cd00", - Color.INTENSE_PURPLE: "#cd00cd", - Color.INTENSE_RED: "#FF0000", - Color.INTENSE_WHITE: "#cccccc", - Color.INTENSE_YELLOW: "#cdcd00", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#cd00cd", - Color.RED: "#cd0000", - Color.WHITE: "#cccccc", - Color.YELLOW: "#cd0000", - } - _expand_style(style) - return style - - -def _vs_style(): - style = { - Color.BLACK: "#008000", - Color.BLUE: "#0000ff", - Color.CYAN: "#2b91af", - Color.GREEN: "#008000", - Color.INTENSE_BLACK: "#2b91af", - Color.INTENSE_BLUE: "#2b91af", - Color.INTENSE_CYAN: "#2b91af", - Color.INTENSE_GREEN: "#2b91af", - Color.INTENSE_PURPLE: "#2b91af", - Color.INTENSE_RED: "#FF0000", - Color.INTENSE_WHITE: "#2b91af", - Color.INTENSE_YELLOW: "#2b91af", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#a31515", - Color.RED: "#a31515", - Color.WHITE: "#2b91af", - Color.YELLOW: "#a31515", - } - _expand_style(style) - return style - - -def _xcode_style(): - style = { - Color.BLACK: "#000000", - Color.BLUE: "#1C01CE", - Color.CYAN: "#3F6E75", - Color.GREEN: "#177500", - Color.INTENSE_BLACK: "#3F6E75", - Color.INTENSE_BLUE: "#2300CE", - Color.INTENSE_CYAN: "#3F6E75", - Color.INTENSE_GREEN: "#3F6E75", - Color.INTENSE_PURPLE: "#A90D91", - Color.INTENSE_RED: "#C41A16", - Color.INTENSE_WHITE: "#3F6E75", - Color.INTENSE_YELLOW: "#836C28", - Color.NO_COLOR: "noinherit", - Color.PURPLE: "#A90D91", - Color.RED: "#C41A16", - Color.WHITE: "#3F6E75", - Color.YELLOW: "#836C28", - } - _expand_style(style) - return style - - -STYLES = LazyDict( - { - "algol": _algol_style, - "algol_nu": _algol_nu_style, - "autumn": _autumn_style, - "borland": _borland_style, - "bw": _bw_style, - "colorful": _colorful_style, - "default": _default_style, - "emacs": _emacs_style, - "friendly": _friendly_style, - "fruity": _fruity_style, - "igor": _igor_style, - "lovelace": _lovelace_style, - "manni": _manni_style, - "monokai": _monokai_style, - "murphy": _murphy_style, - "native": _native_style, - "paraiso-dark": _paraiso_dark_style, - "paraiso-light": _paraiso_light_style, - "pastie": _pastie_style, - "perldoc": _perldoc_style, - "rrt": _rrt_style, - "tango": _tango_style, - "trac": _trac_style, - "vim": _vim_style, - "vs": _vs_style, - "xcode": _xcode_style, - }, - globals(), - "STYLES", -) - -del ( - _algol_style, - _algol_nu_style, - _autumn_style, - _borland_style, - _bw_style, - _colorful_style, - _default_style, - _emacs_style, - _friendly_style, - _fruity_style, - _igor_style, - _lovelace_style, - _manni_style, - _monokai_style, - _murphy_style, - _native_style, - _paraiso_dark_style, - _paraiso_light_style, - _pastie_style, - _perldoc_style, - _rrt_style, - _tango_style, - _trac_style, - _vim_style, - _vs_style, - _xcode_style, -) - - -# dynamic styles -def make_pygments_style(palette): - """Makes a pygments style based on a color palette.""" - global Color - style = {getattr(Color, "NO_COLOR"): "noinherit"} - for name, t in BASE_XONSH_COLORS.items(): - color = find_closest_color(t, palette) - style[getattr(Color, name)] = "#" + color - style[getattr(Color, "BOLD_" + name)] = "bold #" + color - style[getattr(Color, "UNDERLINE_" + name)] = "underline #" + color - style[getattr(Color, "BOLD_UNDERLINE_" + name)] = "bold underline #" + color - style[getattr(Color, "BACKGROUND_" + name)] = "bg:#" + color - return style - - -def pygments_style_by_name(name): - """Gets or makes a pygments color style by its name.""" - if name in STYLES: - return STYLES[name] - pstyle = get_style_by_name(name) - palette = make_palette(pstyle.styles.values()) - astyle = make_pygments_style(palette) - STYLES[name] = astyle - return astyle - - -def _monkey_patch_pygments_codes(): - """ Monky patch pygments' dict of console codes, - with new color names - """ - import pygments.console - - if "brightblack" in pygments.console.codes: - # Assume that colors are already fixed in pygments - # for example when using pygments from source - return - - if not getattr(pygments.console, "_xonsh_patched", False): - patched_codes = {} - for new, old in PTK_NEW_OLD_COLOR_MAP.items(): - if old in pygments.console.codes: - patched_codes[new[1:]] = pygments.console.codes[old] - pygments.console.codes.update(patched_codes) - pygments.console._xonsh_patched = True - - -# -# Formatter -# - - -@lazyobject -def XonshTerminal256Formatter(): - - if ( - ptk_version_info() - and ptk_version_info() > (2, 0) - and pygments_version_info() - and (2, 2, 0) <= pygments_version_info() < (2, 4, 0) - ): - # Monky patch pygments' dict of console codes - # with the new color names used by PTK2 - # Can be removed once pygment names get fixed. - _monkey_patch_pygments_codes() - - class XonshTerminal256FormatterProxy(terminal256.Terminal256Formatter): - """Proxy class for xonsh terminal256 formatting that understands. - xonsh color tokens. - """ - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - # just keep the opening token for colors. - color_names = set(map(str, Color.subtypes)) - for name, (opener, closer) in self.style_string.items(): - if name in color_names: - self.style_string[name] = (opener, "") - # special case NO_COLOR, because it is special. - self.style_string["Token.Color.NO_COLOR"] = ("\x1b[39m", "") - - return XonshTerminal256FormatterProxy + 'root': [(r'^(>>>|\.\.\.) ', Generic.Prompt), + (r'\n(>>>|\.\.\.)', Generic.Prompt), + (r'\n(?![>.][>.][>.] )([^\n]*)', Generic.Output), + (r'\n(?![>.][>.][>.] )(.*?)$', Generic.Output)] + ROOT_TOKENS, + 'pymode': PYMODE_TOKENS, + 'subproc': SUBPROC_TOKENS, + } + +# XonshLexer & XonshSubprocLexer have to refernce each other +XonshSubprocLexer.tokens['root'] = [ + (r'(\$\{)(.*)(\})', bygroups(Keyword, using(XonshLexer), Keyword)), + (r'(@\()(.+)(\))', bygroups(Keyword, using(XonshLexer), Keyword)), +] + XonshSubprocLexer.tokens['root'] diff --git a/xonsh/pygments_cache.py b/xonsh/pygments_cache.py deleted file mode 100644 index 8fcbaed..0000000 --- a/xonsh/pygments_cache.py +++ /dev/null @@ -1,455 +0,0 @@ -"""A fast, drop-in replacement for pygments ``get_*()`` and ``guess_*()`` funtions. - -The following pygments API functions are currently supplied here:: - - from pygments_cache import get_lexer_for_filename, guess_lexer_for_filename - from pygments_cache import get_formatter_for_filename, get_formatter_by_name - from pygments_cache import get_style_by_name, get_all_styles - from pygments_cache import get_filter_by_name - -The cache itself is stored at the location given by the ``$PYGMENTS_CACHE_FILE`` -environment variable, or by default at ``~/.local/share/pygments-cache/cache.py``. -The cache file is created on first use, if it does not already exist. - - -""" -import os -import importlib - - -# Global storage variables -__version__ = "0.1.1" -CACHE = None -DEBUG = False - - -def _print_duplicate_message(duplicates): - import sys - - for filename, vals in sorted(duplicates.items()): - msg = "for {0} ambiquity between:\n ".format(filename) - vals = [m + ":" + c for m, c in vals] - msg += "\n ".join(sorted(vals)) - print(msg, file=sys.stderr) - - -def _discover_lexers(): - import inspect - from pygments.lexers import get_all_lexers, find_lexer_class - - # maps file extension (and names) to (module, classname) tuples - default_exts = { - # C / C++ - ".h": ("pygments.lexers.c_cpp", "CLexer"), - ".hh": ("pygments.lexers.c_cpp", "CppLexer"), - ".cp": ("pygments.lexers.c_cpp", "CppLexer"), - # python - ".py": ("pygments.lexers.python", "Python3Lexer"), - ".pyw": ("pygments.lexers.python", "Python3Lexer"), - ".sc": ("pygments.lexers.python", "Python3Lexer"), - ".tac": ("pygments.lexers.python", "Python3Lexer"), - "SConstruct": ("pygments.lexers.python", "Python3Lexer"), - "SConscript": ("pygments.lexers.python", "Python3Lexer"), - ".sage": ("pygments.lexers.python", "Python3Lexer"), - ".pytb": ("pygments.lexers.python", "Python3TracebackLexer"), - # perl - ".t": ("pygments.lexers.perl", "Perl6Lexer"), - ".pl": ("pygments.lexers.perl", "Perl6Lexer"), - ".pm": ("pygments.lexers.perl", "Perl6Lexer"), - # asm - ".s": ("pygments.lexers.asm", "GasLexer"), - ".S": ("pygments.lexers.asm", "GasLexer"), - ".asm": ("pygments.lexers.asm", "NasmLexer"), - ".ASM": ("pygments.lexers.asm", "NasmLexer"), - # Antlr - ".g": ("pygments.lexers.parsers", "AntlrCppLexer"), - ".G": ("pygments.lexers.parsers", "AntlrCppLexer"), - # XML - ".xml": ("pygments.lexers.html", "XmlLexer"), - ".xsl": ("pygments.lexers.html", "XsltLexer"), - ".xslt": ("pygments.lexers.html", "XsltLexer"), - # ASP - ".axd": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - ".asax": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - ".ascx": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - ".ashx": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - ".asmx": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - ".aspx": ("pygments.lexers.dotnet", "CSharpAspxLexer"), - # misc - ".b": ("pygments.lexers.esoteric", "BrainfuckLexer"), - ".j": ("pygments.lexers.jvm", "JasminLexer"), - ".m": ("pygments.lexers.matlab", "MatlabLexer"), - ".n": ("pygments.lexers.dotnet", "NemerleLexer"), - ".p": ("pygments.lexers.pawn", "PawnLexer"), - ".v": ("pygments.lexers.theorem", "CoqLexer"), - ".as": ("pygments.lexers.actionscript", "ActionScript3Lexer"), - ".fs": ("pygments.lexers.forth", "ForthLexer"), - ".hy": ("pygments.lexers.lisp", "HyLexer"), - ".ts": ("pygments.lexers.javascript", "TypeScriptLexer"), - ".rl": ("pygments.lexers.parsers", "RagelCppLexer"), - ".bas": ("pygments.lexers.basic", "QBasicLexer"), - ".bug": ("pygments.lexers.modeling", "BugsLexer"), - ".ecl": ("pygments.lexers.ecl", "ECLLexer"), - ".inc": ("pygments.lexers.php", "PhpLexer"), - ".inf": ("pygments.lexers.configs", "IniLexer"), - ".pro": ("pygments.lexers.prolog", "PrologLexer"), - ".sql": ("pygments.lexers.sql", "SqlLexer"), - ".txt": ("pygments.lexers.special", "TextLexer"), - ".html": ("pygments.lexers.html", "HtmlLexer"), - } - exts = {} - lexers = {"exts": exts} - if DEBUG: - from collections import defaultdict - - duplicates = defaultdict(set) - for longname, aliases, filenames, mimetypes in get_all_lexers(): - cls = find_lexer_class(longname) - mod = inspect.getmodule(cls) - val = (mod.__name__, cls.__name__) - for filename in filenames: - if filename.startswith("*."): - filename = filename[1:] - if "*" in filename: - continue - if ( - DEBUG - and filename in exts - and exts[filename] != val - and filename not in default_exts - ): - duplicates[filename].add(val) - duplicates[filename].add(exts[filename]) - exts[filename] = val - # remove some ambiquity - exts.update(default_exts) - # print duplicate message - if DEBUG: - _print_duplicate_message(duplicates) - return lexers - - -def _discover_formatters(): - import inspect - from pygments.formatters import get_all_formatters - - # maps file extension (and names) to (module, classname) tuples - default_exts = {} - exts = {} - # maps formatter 'name' (not the class name) and alias to (module, classname) tuples - default_names = {} - names = {} - formatters = {"exts": exts, "names": names} - if DEBUG: - from collections import defaultdict - - duplicates = defaultdict(set) - for cls in get_all_formatters(): - mod = inspect.getmodule(cls) - val = (mod.__name__, cls.__name__) - # add extentions - for filename in cls.filenames: - if filename.startswith("*."): - filename = filename[1:] - if "*" in filename: - continue - if ( - DEBUG - and filename in exts - and exts[filename] != val - and filename not in default_exts - ): - duplicates[filename].add(val) - duplicates[filename].add(exts[filename]) - exts[filename] = val - # add names and aliases - names[cls.name] = val - for alias in cls.aliases: - if ( - DEBUG - and alias in names - and names[alias] != val - and alias not in default_names - ): - duplicates[alias].add(val) - duplicates[alias].add(names[alias]) - names[alias] = val - # remove some ambiquity - exts.update(default_exts) - names.update(default_names) - # print dumplicate message - if DEBUG: - _print_duplicate_message(duplicates) - return formatters - - -def _discover_styles(): - import inspect - from pygments.styles import get_all_styles, get_style_by_name - - # maps style 'name' (not the class name) and aliases to (module, classname) tuples - default_names = {} - names = {} - styles = {"names": names} - if DEBUG: - from collections import defaultdict - - duplicates = defaultdict(set) - for name in get_all_styles(): - cls = get_style_by_name(name) - mod = inspect.getmodule(cls) - val = (mod.__name__, cls.__name__) - if DEBUG and name in names and names[name] != val and name not in default_names: - duplicates[name].add(val) - duplicates[name].add(names[name]) - names[name] = val - # remove some ambiquity - names.update(default_names) - # print dumplicate message - if DEBUG: - _print_duplicate_message(duplicates) - return styles - - -def _discover_filters(): - import inspect - from pygments.filters import get_all_filters, get_filter_by_name - - # maps filter 'name' (not the class name) to (module, classname) tuples - default_names = {} - names = {} - filters = {"names": names} - if DEBUG: - from collections import defaultdict - - duplicates = defaultdict(set) - for name in get_all_filters(): - filter = get_filter_by_name(name) - cls = type(filter) - mod = inspect.getmodule(cls) - val = (mod.__name__, cls.__name__) - if DEBUG and name in names and names[name] != val and name not in default_names: - duplicates[name].add(val) - duplicates[name].add(names[name]) - names[name] = val - # remove some ambiquity - names.update(default_names) - # print dumplicate message - if DEBUG: - _print_duplicate_message(duplicates) - return filters - - -def build_cache(): - """Does the hard work of building a cache from nothing.""" - cache = {} - cache["lexers"] = _discover_lexers() - cache["formatters"] = _discover_formatters() - cache["styles"] = _discover_styles() - cache["filters"] = _discover_filters() - return cache - - -def cache_filename(): - """Gets the name of the cache file to use.""" - # Configuration variables read from the environment - if "PYGMENTS_CACHE_FILE" in os.environ: - return os.environ["PYGMENTS_CACHE_FILE"] - else: - return os.path.join( - os.environ.get( - "XDG_DATA_HOME", - os.path.join(os.path.expanduser("~"), ".local", "share"), - ), - "pygments-cache", - "cache.py", - ) - - -def load(filename): - """Loads the cache from a filename.""" - global CACHE - with open(filename) as f: - s = f.read() - ctx = globals() - CACHE = eval(s, ctx, ctx) - return CACHE - - -def write_cache(filename): - """Writes the current cache to the file""" - from pprint import pformat - - d = os.path.dirname(filename) - os.makedirs(d, exist_ok=True) - s = pformat(CACHE) - with open(filename, "w") as f: - f.write(s) - - -def load_or_build(): - """Loads the cache from disk. If the cache does not exist, - this will build and write it out. - """ - global CACHE - fname = cache_filename() - if os.path.exists(fname): - load(fname) - else: - import sys - - print("pygments cache not found, building...", file=sys.stderr) - CACHE = build_cache() - print("...writing cache to " + fname, file=sys.stderr) - write_cache(fname) - - -# -# pygments interface -# - - -def get_lexer_for_filename(filename, text="", **options): - """Gets a lexer from a filename (usually via the filename extension). - This mimics the behavior of ``pygments.lexers.get_lexer_for_filename()`` - and ``pygments.lexers.guess_lexer_for_filename()``. - """ - if CACHE is None: - load_or_build() - exts = CACHE["lexers"]["exts"] - fname = os.path.basename(filename) - key = fname if fname in exts else os.path.splitext(fname)[1] - if key in exts: - modname, clsname = exts[key] - mod = importlib.import_module(modname) - cls = getattr(mod, clsname) - lexer = cls(**options) - else: - # couldn't find lexer in cache, fallback to the hard way - import inspect - from pygments.lexers import guess_lexer_for_filename - - lexer = guess_lexer_for_filename(filename, text, **options) - # add this filename to the cache for future use - cls = type(lexer) - mod = inspect.getmodule(cls) - exts[fname] = (mod.__name__, cls.__name__) - write_cache(cache_filename()) - return lexer - - -guess_lexer_for_filename = get_lexer_for_filename - - -def get_formatter_for_filename(fn, **options): - """Gets a formatter instance from a filename (usually via the filename - extension). This mimics the behavior of - ``pygments.formatters.get_formatter_for_filename()``. - """ - if CACHE is None: - load_or_build() - exts = CACHE["formatters"]["exts"] - fname = os.path.basename(fn) - key = fname if fname in exts else os.path.splitext(fname)[1] - if key in exts: - modname, clsname = exts[key] - mod = importlib.import_module(modname) - cls = getattr(mod, clsname) - formatter = cls(**options) - else: - # couldn't find formatter in cache, fallback to the hard way - import inspect - from pygments.formatters import get_formatter_for_filename - - formatter = get_formatter_for_filename(fn, **options) - # add this filename to the cache for future use - cls = type(formatter) - mod = inspect.getmodule(cls) - exts[fname] = (mod.__name__, cls.__name__) - write_cache(cache_filename()) - return formatter - - -def get_formatter_by_name(alias, **options): - """Gets a formatter instance from its name or alias. - This mimics the behavior of ``pygments.formatters.get_formatter_by_name()``. - """ - if CACHE is None: - load_or_build() - names = CACHE["formatters"]["names"] - if alias in names: - modname, clsname = names[alias] - mod = importlib.import_module(modname) - cls = getattr(mod, clsname) - formatter = cls(**options) - else: - # couldn't find formatter in cache, fallback to the hard way - import inspect - from pygments.formatters import get_formatter_by_name - - formatter = get_formatter_by_name(alias, **options) - # add this filename to the cache for future use - cls = type(formatter) - mod = inspect.getmodule(cls) - names[alias] = (mod.__name__, cls.__name__) - write_cache(cache_filename()) - return formatter - - -def get_style_by_name(name): - """Gets a style class from its name or alias. - This mimics the behavior of ``pygments.styles.get_style_by_name()``. - """ - if CACHE is None: - load_or_build() - names = CACHE["styles"]["names"] - if name in names: - modname, clsname = names[name] - mod = importlib.import_module(modname) - style = getattr(mod, clsname) - else: - # couldn't find style in cache, fallback to the hard way - import inspect - from pygments.styles import get_style_by_name - - style = get_style_by_name(name) - # add this style to the cache for future use - mod = inspect.getmodule(style) - names[name] = (mod.__name__, style.__name__) - write_cache(cache_filename()) - return style - - -def get_all_styles(): - """Iterable through all known style names. - This mimics the behavior of ``pygments.styles.get_all_styles``. - """ - if CACHE is None: - load_or_build() - yield from CACHE["styles"]["names"] - - -def get_filter_by_name(filtername, **options): - """Gets a filter instance from its name. This mimics the behavior of - ``pygments.filters.get_filtere_by_name()``. - """ - if CACHE is None: - load_or_build() - names = CACHE["filters"]["names"] - if filtername in names: - modname, clsname = names[filtername] - mod = importlib.import_module(modname) - cls = getattr(mod, clsname) - filter = cls(**options) - else: - # couldn't find style in cache, fallback to the hard way - import inspect - from pygments.filters import get_filter_by_name - - filter = get_filter_by_name(filtername, **options) - # add this filter to the cache for future use - cls = type(filter) - mod = inspect.getmodule(cls) - names[filtername] = (mod.__name__, cls.__name__) - write_cache(cache_filename()) - return filter diff --git a/xonsh/pytest_plugin.py b/xonsh/pytest_plugin.py deleted file mode 100644 index af7b3c4..0000000 --- a/xonsh/pytest_plugin.py +++ /dev/null @@ -1,68 +0,0 @@ -# -*- coding: utf-8 -*- -"""Pytest plugin for testing xsh files.""" -import sys -import importlib -from traceback import format_list, extract_tb - -import pytest - -from xonsh.main import setup - - -def pytest_configure(config): - setup() - - -def pytest_collection_modifyitems(items): - items.sort(key=lambda x: 0 if isinstance(x, XshFunction) else 1) - - -def _limited_traceback(excinfo): - """ Return a formatted traceback with all the stack - from this frame (i.e __file__) up removed - """ - tb = extract_tb(excinfo.tb) - try: - idx = [__file__ in e for e in tb].index(True) - return format_list(tb[idx + 1 :]) - except ValueError: - return format_list(tb) - - -def pytest_collect_file(parent, path): - if path.ext.lower() == ".xsh" and path.basename.startswith("test_"): - return XshFile(path, parent) - - -class XshFile(pytest.File): - def collect(self): - sys.path.append(self.fspath.dirname) - mod = importlib.import_module(self.fspath.purebasename) - sys.path.pop(0) - tests = [t for t in dir(mod) if t.startswith("test_")] - for test_name in tests: - obj = getattr(mod, test_name) - if hasattr(obj, "__call__"): - yield XshFunction( - name=test_name, parent=self, test_func=obj, test_module=mod - ) - - -class XshFunction(pytest.Item): - def __init__(self, name, parent, test_func, test_module): - super().__init__(name, parent) - self._test_func = test_func - self._test_module = test_module - - def runtest(self, *args, **kwargs): - self._test_func(*args, **kwargs) - - def repr_failure(self, excinfo): - """ called when self.runtest() raises an exception. """ - formatted_tb = _limited_traceback(excinfo) - formatted_tb.insert(0, "xonsh execution failed\n") - formatted_tb.append("{}: {}".format(excinfo.type.__name__, excinfo.value)) - return "".join(formatted_tb) - - def reportinfo(self): - return self.fspath, 0, "xonsh test: {}".format(self.name) diff --git a/xonsh/readline_shell.py b/xonsh/readline_shell.py index 238789f..9755707 100644 --- a/xonsh/readline_shell.py +++ b/xonsh/readline_shell.py @@ -1,168 +1,60 @@ -# -*- coding: utf-8 -*- -"""The readline based xonsh shell. - -Portions of this code related to initializing the readline library -are included from the IPython project. The IPython project is: - -* Copyright (c) 2008-2014, IPython Development Team -* Copyright (c) 2001-2007, Fernando Perez -* Copyright (c) 2001, Janko Hauser -* Copyright (c) 2001, Nathaniel Gray - -""" +"""The readline based xonsh shell""" import os import sys -import cmd +import time import select -import shutil import builtins -import importlib -import threading -import subprocess -import collections +from cmd import Cmd +from warnings import warn +from threading import Thread, Lock +from collections import deque -from xonsh.lazyasd import LazyObject, lazyobject +from xonsh import lazyjson from xonsh.base_shell import BaseShell -from xonsh.ansi_colors import ( - ansi_partial_color_format, - ansi_color_style_names, - ansi_color_style, -) -from xonsh.prompt.base import multiline_prompt -from xonsh.tools import ( - print_exception, - check_for_partial_string, - to_bool, - columnize, - carriage_return, -) -from xonsh.platform import ( - ON_WINDOWS, - ON_CYGWIN, - ON_MSYS, - ON_DARWIN, - ON_POSIX, - os_environ, -) -from xonsh.lazyimps import pygments, pyghooks, winutils -from xonsh.events import events +from xonsh.tools import ON_WINDOWS -readline = None -RL_COMPLETION_SUPPRESS_APPEND = RL_LIB = RL_STATE = None -RL_COMPLETION_QUERY_ITEMS = None +RL_COMPLETION_SUPPRESS_APPEND = RL_LIB = None RL_CAN_RESIZE = False RL_DONE = None -RL_VARIABLE_VALUE = None -_RL_STATE_DONE = 0x1000000 -_RL_STATE_ISEARCH = 0x0000080 - -_RL_PREV_CASE_SENSITIVE_COMPLETIONS = "to-be-set" def setup_readline(): - """Sets up the readline module and completion suppression, if available.""" - global RL_COMPLETION_SUPPRESS_APPEND, RL_LIB, RL_CAN_RESIZE, RL_STATE, readline, RL_COMPLETION_QUERY_ITEMS + """Sets up the readline module and completion supression, if available.""" + global RL_COMPLETION_SUPPRESS_APPEND, RL_LIB, RL_CAN_RESIZE if RL_COMPLETION_SUPPRESS_APPEND is not None: return - for _rlmod_name in ("gnureadline", "readline"): - try: - readline = importlib.import_module(_rlmod_name) - sys.modules["readline"] = readline - except ImportError: - pass - else: - break - - if readline is None: - print( - """Skipping setup. Because no `readline` implementation available. - Please install a backend (`readline`, `prompt-toolkit`, etc) to use - `xonsh` interactively. - See https://github.com/xonsh/xonsh/issues/1170""" - ) + try: + import readline + except ImportError: return - import ctypes import ctypes.util - - uses_libedit = readline.__doc__ and "libedit" in readline.__doc__ - readline.set_completer_delims(" \t\n") - # Cygwin seems to hang indefinitely when querying the readline lib - if (not ON_CYGWIN) and (not ON_MSYS) and (not readline.__file__.endswith(".py")): + readline.set_completer_delims(' \t\n') + if not readline.__file__.endswith('.py'): RL_LIB = lib = ctypes.cdll.LoadLibrary(readline.__file__) try: RL_COMPLETION_SUPPRESS_APPEND = ctypes.c_int.in_dll( - lib, "rl_completion_suppress_append" - ) + lib, 'rl_completion_suppress_append') except ValueError: # not all versions of readline have this symbol, ie Macs sometimes RL_COMPLETION_SUPPRESS_APPEND = None - try: - RL_COMPLETION_QUERY_ITEMS = ctypes.c_int.in_dll( - lib, "rl_completion_query_items" - ) - except ValueError: - # not all versions of readline have this symbol, ie Macs sometimes - RL_COMPLETION_QUERY_ITEMS = None - try: - RL_STATE = ctypes.c_int.in_dll(lib, "rl_readline_state") - except Exception: - pass - RL_CAN_RESIZE = hasattr(lib, "rl_reset_screen_size") - env = builtins.__xonsh__.env + RL_CAN_RESIZE = hasattr(lib, 'rl_reset_screen_size') + env = builtins.__xonsh_env__ # reads in history readline.set_history_length(-1) ReadlineHistoryAdder() # sets up IPython-like history matching with up and down - readline.parse_and_bind('"\\e[B": history-search-forward') - readline.parse_and_bind('"\\e[A": history-search-backward') + readline.parse_and_bind('"\e[B": history-search-forward') + readline.parse_and_bind('"\e[A": history-search-backward') # Setup Shift-Tab to indent - readline.parse_and_bind('"\\e[Z": "{0}"'.format(env.get("INDENT"))) + readline.parse_and_bind('"\e[Z": "{0}"'.format(env.get('INDENT'))) # handle tab completion differences found in libedit readline compatibility # as discussed at http://stackoverflow.com/a/7116997 - if uses_libedit and ON_DARWIN: + if readline.__doc__ and 'libedit' in readline.__doc__: readline.parse_and_bind("bind ^I rl_complete") - print( - "\n".join( - [ - "", - "*" * 78, - "libedit detected - readline will not be well behaved, including but not limited to:", - " * crashes on tab completion", - " * incorrect history navigation", - " * corrupting long-lines", - " * failure to wrap or indent lines properly", - "", - "It is highly recommended that you install gnureadline, which is installable with:", - " xpip install gnureadline", - "*" * 78, - ] - ), - file=sys.stderr, - ) else: readline.parse_and_bind("tab: complete") - # try to load custom user settings - inputrc_name = os_environ.get("INPUTRC") - if inputrc_name is None: - if uses_libedit: - inputrc_name = ".editrc" - else: - inputrc_name = ".inputrc" - inputrc_name = os.path.join(os.path.expanduser("~"), inputrc_name) - if (not ON_WINDOWS) and (not os.path.isfile(inputrc_name)): - inputrc_name = "/etc/inputrc" - if ON_WINDOWS: - winutils.enable_virtual_terminal_processing() - if os.path.isfile(inputrc_name): - try: - readline.read_init_file(inputrc_name) - except Exception: - # this seems to fail with libedit - print_exception("xonsh: could not load readline default init file.") - # properly reset input typed before the first prompt - readline.set_startup_hook(carriage_return) def teardown_readline(): @@ -173,45 +65,8 @@ def teardown_readline(): return -def _rebind_case_sensitive_completions(): - # handle case sensitive, see Github issue #1342 for details - global _RL_PREV_CASE_SENSITIVE_COMPLETIONS - env = builtins.__xonsh__.env - case_sensitive = env.get("CASE_SENSITIVE_COMPLETIONS") - if case_sensitive is _RL_PREV_CASE_SENSITIVE_COMPLETIONS: - return - if case_sensitive: - readline.parse_and_bind("set completion-ignore-case off") - else: - readline.parse_and_bind("set completion-ignore-case on") - _RL_PREV_CASE_SENSITIVE_COMPLETIONS = case_sensitive - - -def fix_readline_state_after_ctrl_c(): - """ - Fix to allow Ctrl-C to exit reverse-i-search. - - Based on code from: - http://bugs.python.org/file39467/raw_input__workaround_demo.py - """ - if ON_WINDOWS: - # hack to make pyreadline mimic the desired behavior - try: - _q = readline.rl.mode.process_keyevent_queue - if len(_q) > 1: - _q.pop() - except Exception: - pass - if RL_STATE is None: - return - if RL_STATE.value & _RL_STATE_ISEARCH: - RL_STATE.value &= ~_RL_STATE_ISEARCH - if not RL_STATE.value & _RL_STATE_DONE: - RL_STATE.value |= _RL_STATE_DONE - - def rl_completion_suppress_append(val=1): - """Sets the rl_completion_suppress_append variable, if possible. + """Sets the rl_completion_suppress_append varaiable, if possible. A value of 1 (default) means to suppress, a value of 0 means to enable. """ if RL_COMPLETION_SUPPRESS_APPEND is None: @@ -219,243 +74,55 @@ def rl_completion_suppress_append(val=1): RL_COMPLETION_SUPPRESS_APPEND.value = val -def rl_completion_query_items(val=None): - """Sets the rl_completion_query_items variable, if possible. - A None value will set this to $COMPLETION_QUERY_LIMIT, otherwise any integer - is accepted. - """ - if RL_COMPLETION_QUERY_ITEMS is None: - return - if val is None: - val = builtins.__xonsh__.env.get("COMPLETION_QUERY_LIMIT") - RL_COMPLETION_QUERY_ITEMS.value = val - - -def rl_variable_dumper(readable=True): - """Dumps the currently set readline variables. If readable is True, then this - output may be used in an inputrc file. - """ - RL_LIB.rl_variable_dumper(int(readable)) - - -def rl_variable_value(variable): - """Returns the currently set value for a readline configuration variable.""" - global RL_VARIABLE_VALUE - if RL_VARIABLE_VALUE is None: - import ctypes - - RL_VARIABLE_VALUE = RL_LIB.rl_variable_value - RL_VARIABLE_VALUE.restype = ctypes.c_char_p - env = builtins.__xonsh__.env - enc, errors = env.get("XONSH_ENCODING"), env.get("XONSH_ENCODING_ERRORS") - if isinstance(variable, str): - variable = variable.encode(encoding=enc, errors=errors) - rtn = RL_VARIABLE_VALUE(variable) - return rtn.decode(encoding=enc, errors=errors) - - -@lazyobject -def rl_on_new_line(): - """Grabs one of a few possible redisplay functions in readline.""" - names = ["rl_on_new_line", "rl_forced_update_display", "rl_redisplay"] - for name in names: - func = getattr(RL_LIB, name, None) - if func is not None: - break - else: - - def print_for_newline(): - print() - - func = print_for_newline - return func - - def _insert_text_func(s, readline): """Creates a function to insert text via readline.""" - def inserter(): readline.insert_text(s) readline.redisplay() - return inserter -DEDENT_TOKENS = LazyObject( - lambda: frozenset(["raise", "return", "pass", "break", "continue"]), - globals(), - "DEDENT_TOKENS", -) +DEDENT_TOKENS = frozenset(['raise', 'return', 'pass', 'break', 'continue']) -class ReadlineShell(BaseShell, cmd.Cmd): +class ReadlineShell(BaseShell, Cmd): """The readline based xonsh shell.""" - def __init__(self, completekey="tab", stdin=None, stdout=None, **kwargs): - super().__init__(completekey=completekey, stdin=stdin, stdout=stdout, **kwargs) + def __init__(self, completekey='tab', stdin=None, stdout=None, **kwargs): + super().__init__(completekey=completekey, + stdin=stdin, + stdout=stdout, + **kwargs) setup_readline() - self._current_indent = "" - self._current_prompt = "" - self._force_hide = None - self._complete_only_last_table = { - # Truth table for completions, keys are: - # (prefix_begs_quote, prefix_ends_quote, i_ends_quote, - # last_starts_with_prefix, i_has_space) - (True, True, True, True, True): True, - (True, True, True, True, False): True, - (True, True, True, False, True): False, - (True, True, True, False, False): True, - (True, True, False, True, True): False, - (True, True, False, True, False): False, - (True, True, False, False, True): False, - (True, True, False, False, False): False, - (True, False, True, True, True): True, - (True, False, True, True, False): False, - (True, False, True, False, True): False, - (True, False, True, False, False): True, - (True, False, False, True, True): False, - (True, False, False, True, False): False, - (True, False, False, False, True): False, - (True, False, False, False, False): False, - (False, True, True, True, True): True, - (False, True, True, True, False): True, - (False, True, True, False, True): True, - (False, True, True, False, False): True, - (False, True, False, True, True): False, - (False, True, False, True, False): False, - (False, True, False, False, True): False, - (False, True, False, False, False): False, - (False, False, True, True, True): False, - (False, False, True, True, False): False, - (False, False, True, False, True): False, - (False, False, True, False, False): True, - (False, False, False, True, True): True, - (False, False, False, True, False): False, - (False, False, False, False, True): False, - (False, False, False, False, False): False, - } - self.cmdqueue = collections.deque() + self._current_indent = '' + self.cmdqueue = deque() def __del__(self): teardown_readline() - def singleline(self, store_in_history=True, **kwargs): - """Reads a single line of input. The store_in_history kwarg - flags whether the input should be stored in readline's in-memory - history. - """ - if not store_in_history: # store current position to remove it later - try: - import readline - except ImportError: - store_in_history = True - pos = readline.get_current_history_length() - 1 - events.on_pre_prompt.fire() - rtn = input(self.prompt) - events.on_post_prompt.fire() - if not store_in_history and pos >= 0: - readline.remove_history_item(pos) - return rtn - def parseline(self, line): """Overridden to no-op.""" - return "", line, line + return '', line, line - def _querycompletions(self, completions, loc): - """Returns whether or not we should show completions. 0 means that prefixes - should not be shown, 1 means that there is a common prefix among all completions - and they should be shown, while 2 means that there is no common prefix but - we are under the query limit and they should be shown. - """ - if os.path.commonprefix([c[loc:] for c in completions]): - return 1 - elif len(completions) <= builtins.__xonsh__.env.get("COMPLETION_QUERY_LIMIT"): - return 2 - msg = "\nDisplay all {} possibilities? ".format(len(completions)) - msg += "({GREEN}y{NO_COLOR} or {RED}n{NO_COLOR})" - self.print_color(msg, end="", flush=True, file=sys.stderr) - yn = "x" - while yn not in "yn": - yn = sys.stdin.read(1) - show_completions = to_bool(yn) - print() - if not show_completions: - rl_on_new_line() - return 0 - w, h = shutil.get_terminal_size() - lines = columnize(completions, width=w) - more_msg = self.format_color( - "{YELLOW}==={NO_COLOR} more or " - "{PURPLE}({NO_COLOR}q{PURPLE}){NO_COLOR}uit " - "{YELLOW}==={NO_COLOR}" - ) - while len(lines) > h - 1: - print("".join(lines[: h - 1]), end="", flush=True, file=sys.stderr) - lines = lines[h - 1 :] - print(more_msg, end="", flush=True, file=sys.stderr) - q = sys.stdin.read(1).lower() - print(flush=True, file=sys.stderr) - if q == "q": - rl_on_new_line() - return 0 - print("".join(lines), end="", flush=True, file=sys.stderr) - rl_on_new_line() - return 0 - - def completedefault(self, prefix, line, begidx, endidx): + def completedefault(self, text, line, begidx, endidx): """Implements tab-completion for text.""" - if self.completer is None: - return [] rl_completion_suppress_append() # this needs to be called each time - _rebind_case_sensitive_completions() - rl_completion_query_items(val=999999999) - completions, l = self.completer.complete( - prefix, line, begidx, endidx, ctx=self.ctx - ) - chopped = prefix[:-l] - if chopped: - rtn_completions = [chopped + i for i in completions] - else: - rtn_completions = completions - rtn = [] - prefix_begs_quote = prefix.startswith("'") or prefix.startswith('"') - prefix_ends_quote = prefix.endswith("'") or prefix.endswith('"') - for i in rtn_completions: - i_ends_quote = i.endswith("'") or i.endswith('"') - last = i.rsplit(" ", 1)[-1] - last_starts_prefix = last.startswith(prefix) - i_has_space = " " in i - key = ( - prefix_begs_quote, - prefix_ends_quote, - i_ends_quote, - last_starts_prefix, - i_has_space, - ) - rtn.append(last if self._complete_only_last_table[key] else i) - # return based on show completions - show_completions = self._querycompletions(completions, endidx - begidx) - if show_completions == 0: - return [] - elif show_completions == 1: - return rtn - elif show_completions == 2: - return completions - else: - raise ValueError("query completions flag not understood.") + return self.completer.complete(text, line, + begidx, endidx, + ctx=self.ctx) # tab complete on first index too completenames = completedefault def _load_remaining_input_into_queue(self): - buf = b"" + buf = b'' while True: r, w, x = select.select([self.stdin], [], [], 1e-6) if len(r) == 0: break buf += os.read(self.stdin.fileno(), 1024) if len(buf) > 0: - buf = buf.decode().replace("\r\n", "\n").replace("\r", "\n") + buf = buf.decode().replace('\r\n', '\n').replace('\r', '\n') self.cmdqueue.extend(buf.splitlines(keepends=True)) def postcmd(self, stop, line): @@ -469,19 +136,19 @@ def postcmd(self, stop, line): if self.need_more_lines: if len(line.strip()) == 0: readline.set_pre_input_hook(None) - self._current_indent = "" - elif line.rstrip()[-1] == ":": - ind = line[: len(line) - len(line.lstrip())] - ind += builtins.__xonsh__.env.get("INDENT") + self._current_indent = '' + elif line.rstrip()[-1] == ':': + ind = line[:len(line) - len(line.lstrip())] + ind += builtins.__xonsh_env__.get('INDENT') readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind elif line.split(maxsplit=1)[0] in DEDENT_TOKENS: - env = builtins.__xonsh__.env - ind = self._current_indent[: -len(env.get("INDENT"))] + env = builtins.__xonsh_env__ + ind = self._current_indent[:-len(env.get('INDENT'))] readline.set_pre_input_hook(_insert_text_func(ind, readline)) self._current_indent = ind else: - ind = line[: len(line) - len(line.lstrip())] + ind = line[:len(line) - len(line.lstrip())] if ind != self._current_indent: insert_func = _insert_text_func(ind, readline) readline.set_pre_input_hook(insert_func) @@ -502,7 +169,6 @@ def _cmdloop(self, intro=None): if self.use_rawinput and self.completekey: try: import readline - self.old_completer = readline.get_completer() readline.set_completer(self.complete) readline.parse_and_bind(self.completekey + ": complete") @@ -513,41 +179,38 @@ def _cmdloop(self, intro=None): if intro is not None: self.intro = intro if self.intro: - self.stdout.write(str(self.intro) + "\n") + self.stdout.write(str(self.intro)+"\n") stop = None while not stop: line = None exec_now = False if len(self.cmdqueue) > 0: line = self.cmdqueue.popleft() - exec_now = line.endswith("\n") + exec_now = line.endswith('\n') if self.use_rawinput and not exec_now: - inserter = ( - None if line is None else _insert_text_func(line, readline) - ) + inserter = None if line is None \ + else _insert_text_func(line, readline) if inserter is not None: readline.set_pre_input_hook(inserter) try: - line = self.singleline() + line = input(self.prompt) except EOFError: - if builtins.__xonsh__.env.get("IGNOREEOF"): - self.stdout.write('Use "exit" to leave the shell.' "\n") - line = "" - else: - line = "EOF" + line = 'EOF' if inserter is not None: readline.set_pre_input_hook(None) else: - self.print_color(self.prompt, file=self.stdout) + self.stdout.write(self.prompt.replace('\001', '') + .replace('\002', '')) + self.stdout.flush() if line is not None: os.write(self.stdin.fileno(), line.encode()) if not exec_now: line = self.stdin.readline() if len(line) == 0: - line = "EOF" + line = 'EOF' else: - line = line.rstrip("\r\n") - if have_readline and line != "EOF": + line = line.rstrip('\r\n') + if have_readline and line != 'EOF': readline.add_history(line) if not ON_WINDOWS: # select() is not fully functional on windows @@ -555,25 +218,21 @@ def _cmdloop(self, intro=None): line = self.precmd(line) stop = self.onecmd(line) stop = self.postcmd(stop, line) - if ON_WINDOWS: - winutils.enable_virtual_terminal_processing() self.postloop() finally: if self.use_rawinput and self.completekey: try: import readline - readline.set_completer(self.old_completer) except ImportError: pass def cmdloop(self, intro=None): - while not builtins.__xonsh__.exit: + while not builtins.__xonsh_exit__: try: self._cmdloop(intro=intro) - except (KeyboardInterrupt, SystemExit): + except KeyboardInterrupt: print() # Gives a newline - fix_readline_state_after_ctrl_c() self.reset_buffer() intro = None @@ -585,81 +244,14 @@ def prompt(self): # This is needed to support some system where line-wrapping doesn't # work. This is a bug in upstream Python, or possibly readline. RL_LIB.rl_reset_screen_size() - if self.need_more_lines: - if self.mlprompt is None: - try: - self.mlprompt = multiline_prompt(curr=self._current_prompt) - except Exception: # pylint: disable=broad-except - print_exception() - self.mlprompt = " " - return self.mlprompt - env = builtins.__xonsh__.env # pylint: disable=no-member - p = env.get("PROMPT") - try: - p = self.prompt_formatter(p) - except Exception: # pylint: disable=broad-except - print_exception() - hide = True if self._force_hide is None else self._force_hide - p = ansi_partial_color_format(p, style=env.get("XONSH_COLOR_STYLE"), hide=hide) - self._current_prompt = p - self.settitle() - return p - - def format_color(self, string, hide=False, force_string=False, **kwargs): - """Readline implementation of color formatting. This uses ANSI color - codes. - """ - hide = hide if self._force_hide is None else self._force_hide - style = builtins.__xonsh__.env.get("XONSH_COLOR_STYLE") - return ansi_partial_color_format(string, hide=hide, style=style) - - def print_color(self, string, hide=False, **kwargs): - if isinstance(string, str): - s = self.format_color(string, hide=hide) - else: - # assume this is a list of (Token, str) tuples and format it - env = builtins.__xonsh__.env - self.styler.style_name = env.get("XONSH_COLOR_STYLE") - style_proxy = pyghooks.xonsh_style_proxy(self.styler) - formatter = pyghooks.XonshTerminal256Formatter(style=style_proxy) - s = pygments.format(string, formatter).rstrip() - print(s, **kwargs) + return super().prompt - def color_style_names(self): - """Returns an iterable of all available style names.""" - return ansi_color_style_names() - def color_style(self): - """Returns the current color map.""" - style = style = builtins.__xonsh__.env.get("XONSH_COLOR_STYLE") - return ansi_color_style(style=style) +class ReadlineHistoryAdder(Thread): - def restore_tty_sanity(self): - """An interface for resetting the TTY stdin mode. This is highly - dependent on the shell backend. Also it is mostly optional since - it only affects ^Z backgrounding behaviour. - """ - if not ON_POSIX: - return - stty, _ = builtins.__xonsh__.commands_cache.lazyget("stty", None) - if stty is None: - return - # If available, we should just call the stty utility. This call should - # not throw even if stty fails. It should also be noted that subprocess - # calls, like the following, seem to be ineffective: - # subprocess.call([stty, 'sane'], shell=True) - # My guess is that this is because Popen does some crazy redirecting - # under the covers. This effectively hides the true TTY stdin handle - # from stty. To get around this we have to use the lower level - # os.system() function. - os.system(stty + " sane") - - -class ReadlineHistoryAdder(threading.Thread): def __init__(self, wait_for_gc=True, *args, **kwargs): - """Thread responsible for adding inputs from history to the - current readline instance. May wait for the history garbage - collector to finish. + """Thread responsible for adding inputs from history to the current readline + instance. May wait for the history garbage collector to finish. """ super(ReadlineHistoryAdder, self).__init__(*args, **kwargs) self.daemon = True @@ -671,17 +263,24 @@ def run(self): import readline except ImportError: return - hist = builtins.__xonsh__.history - if hist is None: - return + hist = builtins.__xonsh_history__ + while self.wait_for_gc and hist.gc.is_alive(): + time.sleep(0.011) # gc sleeps for 0.01 secs, sleep a beat longer + files = hist.gc.unlocked_files() i = 1 - for h in hist.all_items(): - line = h["inp"].rstrip() - if i == 1: - pass - elif line == readline.get_history_item(i - 1): + for _, _, f in files: + try: + lj = lazyjson.LazyJSON(f, reopen=False) + for cmd in lj['cmds']: + inp = cmd['inp'].splitlines() + for line in inp: + if line == 'EOF': + continue + readline.add_history(line) + if RL_LIB is not None: + RL_LIB.history_set_pos(i) + i += 1 + lj.close() + except (IOError, OSError): continue - readline.add_history(line) - if RL_LIB is not None: - RL_LIB.history_set_pos(i) - i += 1 + diff --git a/xonsh/replay.py b/xonsh/replay.py index 526a866..1c5b161 100644 --- a/xonsh/replay.py +++ b/xonsh/replay.py @@ -1,17 +1,15 @@ -# -*- coding: utf-8 -*- """Tools to replay xonsh history files.""" -import json import time import builtins -import collections.abc as cabc +from collections.abc import Mapping from xonsh.tools import swap -from xonsh.lazyjson import LazyJSON +from xonsh import lazyjson from xonsh.environ import Env -import xonsh.history.main as xhm +from xonsh.history import History +from xonsh.history import _info as history_info - -DEFAULT_MERGE_ENVS = ("replay", "native") +DEFAULT_MERGE_ENVS = ('replay', 'native') class Replayer(object): @@ -27,119 +25,95 @@ def __init__(self, f, reopen=True): Whether new file handle should be opened for each load, passed directly into LazyJSON class. """ - self._lj = LazyJSON(f, reopen=reopen) + self._lj = lazyjson.LazyJSON(f, reopen=reopen) def __del__(self): self._lj.close() def replay(self, merge_envs=DEFAULT_MERGE_ENVS, target=None): - """Replays the history specified, returns the history object where the code + """Replays the history specified, returns the history object where the code was executed. Parameters ---------- merge_env : tuple of str or Mappings, optional - Describes how to merge the environments, in order of increasing precedence. + Describes how to merge the environments, in order of increasing precednce. Available strings are 'replay' and 'native'. The 'replay' env comes from the history file that we are replaying. The 'native' env comes from what this - instance of xonsh was started up with. Instead of a string, a dict or other + instance of xonsh was started up with. Instead of a string, a dict or other mapping may be passed in as well. Defaults to ('replay', 'native'). target : str, optional Path to new history file. """ - shell = builtins.__xonsh__.shell - re_env = self._lj["env"].load() + shell = builtins.__xonsh_shell__ + re_env = self._lj['env'].load() new_env = self._merge_envs(merge_envs, re_env) - new_hist = xhm.construct_history( - env=new_env.detype(), - locked=True, - ts=[time.time(), None], - gc=False, - filename=target, - ) - with swap(builtins.__xonsh__, "env", new_env), swap( - builtins.__xonsh__, "history", new_hist - ): - for cmd in self._lj["cmds"]: - inp = cmd["inp"] + new_hist = History(env=new_env.detype(), locked=True, ts=[time.time(), None], + gc=False, filename=target) + with swap(builtins, '__xonsh_env__', new_env), \ + swap(builtins, '__xonsh_history__', new_hist): + for cmd in self._lj['cmds']: + inp = cmd['inp'] shell.default(inp) - if builtins.__xonsh__.exit: # prevent premature exit - builtins.__xonsh__.exit = False + if builtins.__xonsh_exit__: # prevent premature exit + builtins.__xonsh_exit__ = False new_hist.flush(at_exit=True) return new_hist def _merge_envs(self, merge_envs, re_env): new_env = {} for e in merge_envs: - if e == "replay": + if e == 'replay': new_env.update(re_env) - elif e == "native": - new_env.update(builtins.__xonsh__.env) - elif isinstance(e, cabc.Mapping): + elif e == 'native': + new_env.update(builtins.__xonsh_env__) + elif isinstance(e, Mapping): new_env.update(e) else: - raise TypeError("Type of env not understood: {0!r}".format(e)) + raise TypeError('Type of env not understood: {0!r}'.format(e)) new_env = Env(**new_env) return new_env _REPLAY_PARSER = None - -def replay_create_parser(p=None): +def _create_parser(p=None): global _REPLAY_PARSER - p_was_none = p is None + p_was_none = (p is None) if _REPLAY_PARSER is not None and p_was_none: return _REPLAY_PARSER if p_was_none: from argparse import ArgumentParser - - p = ArgumentParser("replay", description="replays a xonsh history file") - p.add_argument( - "--merge-envs", - dest="merge_envs", - default=DEFAULT_MERGE_ENVS, - nargs="+", - help="Describes how to merge the environments, in order of " - "increasing precedence. Available strings are 'replay' and " - "'native'. The 'replay' env comes from the history file that we " - "are replaying. The 'native' env comes from what this instance " - "of xonsh was started up with. One or more of these options may " - "be passed in. Defaults to '--merge-envs replay native'.", - ) - p.add_argument( - "--json", - dest="json", - default=False, - action="store_true", - help="print history info in JSON format", - ) - p.add_argument( - "-o", "--target", dest="target", default=None, help="path to new history file" - ) - p.add_argument("path", help="path to replay history file") + p = ArgumentParser('replay', description='replays a xonsh history file') + p.add_argument('--merge-envs', dest='merge_envs', default=DEFAULT_MERGE_ENVS, + nargs='+', + help="Describes how to merge the environments, in order of " + "increasing precedence. Available strings are 'replay' and " + "'native'. The 'replay' env comes from the history file that we " + "are replaying. The 'native' env comes from what this instance " + "of xonsh was started up with. One or more of these options may " + "be passed in. Defaults to '--merge-envs replay native'.") + p.add_argument('--json', dest='json', default=False, action='store_true', + help='print history info in JSON format') + p.add_argument('-o', '--target', dest='target', default=None, + help='path to new history file') + p.add_argument('path', help='path to replay history file') if p_was_none: _REPLAY_PARSER = p return p -def replay_main_action(h, ns, stdout=None, stderr=None): +def _main_action(ns, h=None): replayer = Replayer(ns.path) hist = replayer.replay(merge_envs=ns.merge_envs, target=ns.target) - print("----------------------------------------------------------------") - print("Just replayed history, new history has the following information") - print("----------------------------------------------------------------") - data = hist.info() - if ns.json: - s = json.dumps(data) - print(s, file=stdout) - else: - lines = ["{0}: {1}".format(k, v) for k, v in data.items()] - print("\n".join(lines), file=stdout) - - -def replay_main(args, stdin=None): + print('----------------------------------------------------------------') + print('Just replayed history, new history has the following information') + print('----------------------------------------------------------------') + history_info(ns, hist) + + +def main(args, stdin=None): """Acts as main function for replaying a xonsh history file.""" - parser = replay_create_parser() + parser = _create_parser() ns = parser.parse_args(args) - replay_main_action(ns) + _main_action(ns) diff --git a/xonsh/shell.py b/xonsh/shell.py index 18f09ae..1779dd1 100644 --- a/xonsh/shell.py +++ b/xonsh/shell.py @@ -1,112 +1,19 @@ -# -*- coding: utf-8 -*- """The xonsh shell""" -import sys -import random -import time -import difflib import builtins -import warnings +from warnings import warn -from xonsh.platform import ( - best_shell_type, - has_prompt_toolkit, - ptk_above_min_supported, - ptk_shell_type, -) -from xonsh.tools import XonshError, print_exception -from xonsh.events import events -import xonsh.history.main as xhm +from xonsh.execer import Execer +from xonsh.environ import xonshrc_context +from xonsh.tools import XonshError -events.doc( - "on_transform_command", - """ -on_transform_command(cmd: str) -> str - -Fired to request xontribs to transform a command line. Return the transformed -command, or the same command if no transformation occurs. Only done for -interactive sessions. - -This may be fired multiple times per command, with other transformers input or -output, so design any handlers for this carefully. -""", -) - -events.doc( - "on_precommand", - """ -on_precommand(cmd: str) -> None - -Fires just before a command is executed. -""", -) - -events.doc( - "on_postcommand", - """ -on_postcommand(cmd: str, rtn: int, out: str or None, ts: list) -> None - -Fires just after a command is executed. The arguments are the same as history. - -Parameters: - -* ``cmd``: The command that was executed (after transformation) -* ``rtn``: The result of the command executed (``0`` for success) -* ``out``: If xonsh stores command output, this is the output -* ``ts``: Timestamps, in the order of ``[starting, ending]`` -""", -) - -events.doc( - "on_pre_prompt", - """ -on_pre_prompt() -> None - -Fires just before the prompt is shown -""", -) - -events.doc( - "on_post_prompt", - """ -on_post_prompt() -> None - -Fires just after the prompt returns -""", -) - - -def transform_command(src, show_diff=True): - """Returns the results of firing the precommand handles.""" - i = 0 - limit = sys.getrecursionlimit() - lst = "" - raw = src - while src != lst: - lst = src - srcs = events.on_transform_command.fire(cmd=src) - for s in srcs: - if s != lst: - src = s - break - i += 1 - if i == limit: - print_exception( - "Modifications to source input took more than " - "the recursion limit number of iterations to " - "converge." - ) - debug_level = builtins.__xonsh__.env.get("XONSH_DEBUG") - if show_diff and debug_level > 1 and src != raw: - sys.stderr.writelines( - difflib.unified_diff( - raw.splitlines(keepends=True), - src.splitlines(keepends=True), - fromfile="before precommand event", - tofile="after precommand event", - ) - ) - return src +def is_prompt_toolkit_available(): + """Checks if prompt_toolkit is available to import.""" + try: + import prompt_toolkit + return True + except ImportError: + return False class Shell(object): @@ -116,98 +23,43 @@ class Shell(object): readline version of shell should be used. """ - shell_type_aliases = { - "b": "best", - "best": "best", - "d": "dumb", - "dumb": "dumb", - "ptk": "prompt_toolkit", - "ptk1": "prompt_toolkit1", - "ptk2": "prompt_toolkit2", - "prompt-toolkit": "prompt_toolkit", - "prompt_toolkit": "prompt_toolkit", - "prompt-toolkit1": "prompt_toolkit1", - "prompt-toolkit2": "prompt_toolkit2", - "rand": "random", - "random": "random", - "rl": "readline", - "readline": "readline", - } - - def __init__(self, execer, ctx=None, shell_type=None, **kwargs): - """ - Parameters - ---------- - execer : Execer - An execer instance capable of running xonsh code. - ctx : Mapping, optional - The execution context for the shell (e.g. the globals namespace). - If none, this is computed by loading the rc files. If not None, - this no additional context is computed and this is used - directly. - shell_type : str, optional - The shell type to start, such as 'readline', 'prompt_toolkit1', - or 'random'. - """ - self.execer = execer - self.ctx = {} if ctx is None else ctx - env = builtins.__xonsh__.env - # build history backend before creating shell - builtins.__xonsh__.history = hist = xhm.construct_history( - env=env.detype(), ts=[time.time(), None], locked=True - ) - - # pick a valid shell -- if no shell is specified by the user, - # shell type is pulled from env - if shell_type is None: - shell_type = env.get("SHELL_TYPE") - if shell_type == "none": - # This bricks interactive xonsh - # Can happen from the use of .xinitrc, .xsession, etc - shell_type = "best" - shell_type = self.shell_type_aliases.get(shell_type, shell_type) - if shell_type == "best" or shell_type is None: - shell_type = best_shell_type() - elif env.get("TERM", "") == "dumb": - shell_type = "dumb" - elif shell_type == "random": - shell_type = random.choice(("readline", "prompt_toolkit")) - if shell_type == "prompt_toolkit": - if not has_prompt_toolkit(): - warnings.warn( - "prompt_toolkit is not available, using " "readline instead." - ) - shell_type = "readline" - elif not ptk_above_min_supported(): - warnings.warn( - "prompt-toolkit version < v1.0.0 is not " - "supported. Please update prompt-toolkit. Using " - "readline instead." - ) - shell_type = "readline" - else: - shell_type = ptk_shell_type() - self.shell_type = env["SHELL_TYPE"] = shell_type + def __init__(self, ctx=None, shell_type=None, **kwargs): + self._init_environ(ctx) + env = builtins.__xonsh_env__ + # pick a valid shell + if shell_type is not None: + env['SHELL_TYPE'] = shell_type + shell_type = env.get('SHELL_TYPE') + if shell_type == 'prompt_toolkit': + if not is_prompt_toolkit_available(): + warn('prompt_toolkit is not available, using readline instead.') + shell_type = env['SHELL_TYPE'] = 'readline' # actually make the shell - if shell_type == "none": - from xonsh.base_shell import BaseShell as shell_class - elif shell_type == "prompt_toolkit2": - from xonsh.ptk2.shell import PromptToolkit2Shell as shell_class - elif shell_type == "prompt_toolkit1": - from xonsh.ptk.shell import PromptToolkitShell as shell_class - elif shell_type == "readline": - from xonsh.readline_shell import ReadlineShell as shell_class - elif shell_type == "jupyter": - from xonsh.jupyter_shell import JupyterShell as shell_class - elif shell_type == "dumb": - from xonsh.dumb_shell import DumbShell as shell_class + if shell_type == 'prompt_toolkit': + from xonsh.prompt_toolkit_shell import PromptToolkitShell + self.shell = PromptToolkitShell(execer=self.execer, + ctx=self.ctx, **kwargs) + elif shell_type == 'readline': + from xonsh.readline_shell import ReadlineShell + self.shell = ReadlineShell(execer=self.execer, + ctx=self.ctx, **kwargs) else: - raise XonshError("{} is not recognized as a shell type".format(shell_type)) - self.shell = shell_class(execer=self.execer, ctx=self.ctx, **kwargs) - # allows history garbage collector to start running - if hist.gc is not None: - hist.gc.wait_for_shell = False + raise XonshError('{} is not recognized as a shell type'.format( + shell_type)) + # allows history garbace colector to start running + builtins.__xonsh_history__.gc.wait_for_shell = False def __getattr__(self, attr): """Delegates calls to appropriate shell instance.""" return getattr(self.shell, attr) + + def _init_environ(self, ctx): + self.execer = Execer() + env = builtins.__xonsh_env__ + if ctx is not None: + self.ctx = ctx + else: + rc = env.get('XONSHRC') + self.ctx = xonshrc_context(rcfiles=rc, execer=self.execer) + builtins.__xonsh_ctx__ = self.ctx + self.ctx['__name__'] = '__main__' diff --git a/xonsh/style_tools.py b/xonsh/style_tools.py deleted file mode 100644 index 8d2eee9..0000000 --- a/xonsh/style_tools.py +++ /dev/null @@ -1,446 +0,0 @@ -"""Xonsh color styling tools that simulate pygments, when it is unavailable.""" -import builtins -from collections import defaultdict - -from xonsh.platform import HAS_PYGMENTS -from xonsh.lazyasd import LazyObject -from xonsh.color_tools import RE_BACKGROUND -from xonsh.tools import FORMATTER - - -class _TokenType(tuple): - """ - Forked from the pygments project - https://bitbucket.org/birkenfeld/pygments-main - Copyright (c) 2006-2017 by the respective authors, All rights reserved. - See https://bitbucket.org/birkenfeld/pygments-main/raw/05818a4ef9891d9ac22c851f7b3ea4b4fce460ab/AUTHORS - """ - - parent = None - - def split(self): - buf = [] - node = self - while node is not None: - buf.append(node) - node = node.parent - buf.reverse() - return buf - - def __init__(self, *args): - # no need to call super.__init__ - self.subtypes = set() - - def __contains__(self, val): - return self is val or (type(val) is self.__class__ and val[: len(self)] == self) - - def __getattr__(self, val): - if not val or not val[0].isupper(): - return tuple.__getattribute__(self, val) - new = _TokenType(self + (val,)) - setattr(self, val, new) - self.subtypes.add(new) - new.parent = self - return new - - def __repr__(self): - return "Token" + (self and "." or "") + ".".join(self) - - def __copy__(self): - # These instances are supposed to be singletons - return self - - def __deepcopy__(self, memo): - # These instances are supposed to be singletons - return self - - -Token = _TokenType() -Color = Token.Color - - -def partial_color_tokenize(template): - """Tokenizes a template string containing colors. Will return a list - of tuples mapping the token to the string which has that color. - These sub-strings maybe templates themselves. - """ - if HAS_PYGMENTS and builtins.__xonsh__.shell is not None: - styles = __xonsh__.shell.shell.styler.styles - elif builtins.__xonsh__.shell is not None: - styles = DEFAULT_STYLE_DICT - else: - styles = None - color = Color.NO_COLOR - try: - toks, color = _partial_color_tokenize_main(template, styles) - except Exception: - toks = [(Color.NO_COLOR, template)] - if styles is not None: - styles[color] # ensure color is available - return toks - - -def _partial_color_tokenize_main(template, styles): - bopen = "{" - bclose = "}" - colon = ":" - expl = "!" - color = Color.NO_COLOR - fg = bg = None - value = "" - toks = [] - for literal, field, spec, conv in FORMATTER.parse(template): - if field is None: - value += literal - elif field in KNOWN_COLORS or "#" in field: - value += literal - next_color, fg, bg = color_by_name(field, fg, bg) - if next_color is not color: - if len(value) > 0: - toks.append((color, value)) - if styles is not None: - styles[color] # ensure color is available - color = next_color - value = "" - elif field is not None: - parts = [literal, bopen, field] - if conv is not None and len(conv) > 0: - parts.append(expl) - parts.append(conv) - if spec is not None and len(spec) > 0: - parts.append(colon) - parts.append(spec) - parts.append(bclose) - value += "".join(parts) - else: - value += literal - toks.append((color, value)) - return toks, color - - -def color_by_name(name, fg=None, bg=None): - """Converts a color name to a color token, foreground name, - and background name. Will take into consideration current foreground - and background colors, if provided. - - Parameters - ---------- - name : str - Color name. - fg : str, optional - Foreground color name. - bg : str, optional - Background color name. - - Returns - ------- - tok : Token - Pygments Token.Color subclass - fg : str or None - New computed foreground color name. - bg : str or None - New computed background color name. - """ - name = name.upper() - if name == "NO_COLOR": - return Color.NO_COLOR, None, None - m = RE_BACKGROUND.search(name) - if m is None: # must be foreground color - fg = norm_name(name) - else: - bg = norm_name(name) - # assemble token - if fg is None and bg is None: - tokname = "NO_COLOR" - elif fg is None: - tokname = bg - elif bg is None: - tokname = fg - else: - tokname = fg + "__" + bg - tok = getattr(Color, tokname) - return tok, fg, bg - - -def norm_name(name): - """Normalizes a color name.""" - return name.replace("#", "HEX").replace("BGHEX", "BACKGROUND_HEX") - - -KNOWN_COLORS = LazyObject( - lambda: frozenset( - [ - "BACKGROUND_BLACK", - "BACKGROUND_BLUE", - "BACKGROUND_CYAN", - "BACKGROUND_GREEN", - "BACKGROUND_INTENSE_BLACK", - "BACKGROUND_INTENSE_BLUE", - "BACKGROUND_INTENSE_CYAN", - "BACKGROUND_INTENSE_GREEN", - "BACKGROUND_INTENSE_PURPLE", - "BACKGROUND_INTENSE_RED", - "BACKGROUND_INTENSE_WHITE", - "BACKGROUND_INTENSE_YELLOW", - "BACKGROUND_PURPLE", - "BACKGROUND_RED", - "BACKGROUND_WHITE", - "BACKGROUND_YELLOW", - "BLACK", - "BLUE", - "BOLD_BLACK", - "BOLD_BLUE", - "BOLD_CYAN", - "BOLD_GREEN", - "BOLD_INTENSE_BLACK", - "BOLD_INTENSE_BLUE", - "BOLD_INTENSE_CYAN", - "BOLD_INTENSE_GREEN", - "BOLD_INTENSE_PURPLE", - "BOLD_INTENSE_RED", - "BOLD_INTENSE_WHITE", - "BOLD_INTENSE_YELLOW", - "BOLD_PURPLE", - "BOLD_RED", - "BOLD_UNDERLINE_BLACK", - "BOLD_UNDERLINE_BLUE", - "BOLD_UNDERLINE_CYAN", - "BOLD_UNDERLINE_GREEN", - "BOLD_UNDERLINE_INTENSE_BLACK", - "BOLD_UNDERLINE_INTENSE_BLUE", - "BOLD_UNDERLINE_INTENSE_CYAN", - "BOLD_UNDERLINE_INTENSE_GREEN", - "BOLD_UNDERLINE_INTENSE_PURPLE", - "BOLD_UNDERLINE_INTENSE_RED", - "BOLD_UNDERLINE_INTENSE_WHITE", - "BOLD_UNDERLINE_INTENSE_YELLOW", - "BOLD_UNDERLINE_PURPLE", - "BOLD_UNDERLINE_RED", - "BOLD_UNDERLINE_WHITE", - "BOLD_UNDERLINE_YELLOW", - "BOLD_WHITE", - "BOLD_YELLOW", - "CYAN", - "GREEN", - "INTENSE_BLACK", - "INTENSE_BLUE", - "INTENSE_CYAN", - "INTENSE_GREEN", - "INTENSE_PURPLE", - "INTENSE_RED", - "INTENSE_WHITE", - "INTENSE_YELLOW", - "NO_COLOR", - "PURPLE", - "RED", - "UNDERLINE_BLACK", - "UNDERLINE_BLUE", - "UNDERLINE_CYAN", - "UNDERLINE_GREEN", - "UNDERLINE_INTENSE_BLACK", - "UNDERLINE_INTENSE_BLUE", - "UNDERLINE_INTENSE_CYAN", - "UNDERLINE_INTENSE_GREEN", - "UNDERLINE_INTENSE_PURPLE", - "UNDERLINE_INTENSE_RED", - "UNDERLINE_INTENSE_WHITE", - "UNDERLINE_INTENSE_YELLOW", - "UNDERLINE_PURPLE", - "UNDERLINE_RED", - "UNDERLINE_WHITE", - "UNDERLINE_YELLOW", - "WHITE", - "YELLOW", - ] - ), - globals(), - "KNOWN_COLORS", -) - -DEFAULT_STYLE_DICT = LazyObject( - lambda: defaultdict( - lambda: "", - { - Token: "", - Token.Aborted: "ansibrightblack", - Token.AutoSuggestion: "ansibrightblack", - Token.Color.BACKGROUND_BLACK: "bg:ansiblack", - Token.Color.BACKGROUND_BLUE: "bg:ansiblue", - Token.Color.BACKGROUND_CYAN: "bg:ansicyan", - Token.Color.BACKGROUND_GREEN: "bg:ansigreen", - Token.Color.BACKGROUND_INTENSE_BLACK: "bg:ansibrightblack", - Token.Color.BACKGROUND_INTENSE_BLUE: "bg:ansibrightblue", - Token.Color.BACKGROUND_INTENSE_CYAN: "bg:ansibrightcyan", - Token.Color.BACKGROUND_INTENSE_GREEN: "bg:ansibrightgreen", - Token.Color.BACKGROUND_INTENSE_PURPLE: "bg:ansibrightmagenta", - Token.Color.BACKGROUND_INTENSE_RED: "bg:ansibrightred", - Token.Color.BACKGROUND_INTENSE_WHITE: "bg:ansiwhite", - Token.Color.BACKGROUND_INTENSE_YELLOW: "bg:ansibrightyellow", - Token.Color.BACKGROUND_PURPLE: "bg:ansimagenta", - Token.Color.BACKGROUND_RED: "bg:ansired", - Token.Color.BACKGROUND_WHITE: "bg:ansigray", - Token.Color.BACKGROUND_YELLOW: "bg:ansiyellow", - Token.Color.BLACK: "ansiblack", - Token.Color.BLUE: "ansiblue", - Token.Color.BOLD_BLACK: "bold ansiblack", - Token.Color.BOLD_BLUE: "bold ansiblue", - Token.Color.BOLD_CYAN: "bold ansicyan", - Token.Color.BOLD_GREEN: "bold ansigreen", - Token.Color.BOLD_INTENSE_BLACK: "bold ansibrightblack", - Token.Color.BOLD_INTENSE_BLUE: "bold ansibrightblue", - Token.Color.BOLD_INTENSE_CYAN: "bold ansibrightcyan", - Token.Color.BOLD_INTENSE_GREEN: "bold ansibrightgreen", - Token.Color.BOLD_INTENSE_PURPLE: "bold ansibrightmagenta", - Token.Color.BOLD_INTENSE_RED: "bold ansibrightred", - Token.Color.BOLD_INTENSE_WHITE: "bold ansiwhite", - Token.Color.BOLD_INTENSE_YELLOW: "bold ansibrightyellow", - Token.Color.BOLD_PURPLE: "bold ansimagenta", - Token.Color.BOLD_RED: "bold ansired", - Token.Color.BOLD_UNDERLINE_BLACK: "bold underline ansiblack", - Token.Color.BOLD_UNDERLINE_BLUE: "bold underline ansiblue", - Token.Color.BOLD_UNDERLINE_CYAN: "bold underline ansicyan", - Token.Color.BOLD_UNDERLINE_GREEN: "bold underline ansigreen", - Token.Color.BOLD_UNDERLINE_INTENSE_BLACK: "bold underline ansibrightblack", - Token.Color.BOLD_UNDERLINE_INTENSE_BLUE: "bold underline ansibrightblue", - Token.Color.BOLD_UNDERLINE_INTENSE_CYAN: "bold underline ansibrightcyan", - Token.Color.BOLD_UNDERLINE_INTENSE_GREEN: "bold underline ansibrightgreen", - Token.Color.BOLD_UNDERLINE_INTENSE_PURPLE: "bold underline ansibrightmagenta", - Token.Color.BOLD_UNDERLINE_INTENSE_RED: "bold underline ansibrightred", - Token.Color.BOLD_UNDERLINE_INTENSE_WHITE: "bold underline ansiwhite", - Token.Color.BOLD_UNDERLINE_INTENSE_YELLOW: "bold underline ansibrightyellow", - Token.Color.BOLD_UNDERLINE_PURPLE: "bold underline ansimagenta", - Token.Color.BOLD_UNDERLINE_RED: "bold underline ansired", - Token.Color.BOLD_UNDERLINE_WHITE: "bold underline ansigray", - Token.Color.BOLD_UNDERLINE_YELLOW: "bold underline ansiyellow", - Token.Color.BOLD_WHITE: "bold ansigray", - Token.Color.BOLD_YELLOW: "bold ansiyellow", - Token.Color.CYAN: "ansicyan", - Token.Color.GREEN: "ansigreen", - Token.Color.INTENSE_BLACK: "ansibrightblack", - Token.Color.INTENSE_BLUE: "ansibrightblue", - Token.Color.INTENSE_CYAN: "ansibrightcyan", - Token.Color.INTENSE_GREEN: "ansibrightgreen", - Token.Color.INTENSE_PURPLE: "ansibrightmagenta", - Token.Color.INTENSE_RED: "ansibrightred", - Token.Color.INTENSE_WHITE: "ansiwhite", - Token.Color.INTENSE_YELLOW: "ansibrightyellow", - Token.Color.NO_COLOR: "noinherit", - Token.Color.PURPLE: "ansimagenta", - Token.Color.RED: "ansired", - Token.Color.UNDERLINE_BLACK: "underline ansiblack", - Token.Color.UNDERLINE_BLUE: "underline ansiblue", - Token.Color.UNDERLINE_CYAN: "underline ansicyan", - Token.Color.UNDERLINE_GREEN: "underline ansigreen", - Token.Color.UNDERLINE_INTENSE_BLACK: "underline ansibrightblack", - Token.Color.UNDERLINE_INTENSE_BLUE: "underline ansibrightblue", - Token.Color.UNDERLINE_INTENSE_CYAN: "underline ansibrightcyan", - Token.Color.UNDERLINE_INTENSE_GREEN: "underline ansibrightgreen", - Token.Color.UNDERLINE_INTENSE_PURPLE: "underline ansibrightmagenta", - Token.Color.UNDERLINE_INTENSE_RED: "underline ansibrightred", - Token.Color.UNDERLINE_INTENSE_WHITE: "underline ansiwhite", - Token.Color.UNDERLINE_INTENSE_YELLOW: "underline ansibrightyellow", - Token.Color.UNDERLINE_PURPLE: "underline ansimagenta", - Token.Color.UNDERLINE_RED: "underline ansired", - Token.Color.UNDERLINE_WHITE: "underline ansigray", - Token.Color.UNDERLINE_YELLOW: "underline ansiyellow", - Token.Color.WHITE: "ansigray", - Token.Color.YELLOW: "ansiyellow", - Token.Comment: "underline ansicyan", - Token.Comment.Hashbang: "", - Token.Comment.Multiline: "", - Token.Comment.Preproc: "underline ansiyellow", - Token.Comment.PreprocFile: "", - Token.Comment.Single: "", - Token.Comment.Special: "", - Token.Error: "ansibrightred", - Token.Escape: "", - Token.Generic: "", - Token.Generic.Deleted: "ansired", - Token.Generic.Emph: "underline", - Token.Generic.Error: "bold ansibrightred", - Token.Generic.Heading: "bold ansiblue", - Token.Generic.Inserted: "ansibrightgreen", - Token.Generic.Output: "ansiblue", - Token.Generic.Prompt: "bold ansiblue", - Token.Generic.Strong: "", - Token.Generic.Subheading: "bold ansimagenta", - Token.Generic.Traceback: "ansiblue", - Token.Keyword: "bold ansigreen", - Token.Keyword.Constant: "", - Token.Keyword.Declaration: "", - Token.Keyword.Namespace: "", - Token.Keyword.Pseudo: "nobold", - Token.Keyword.Reserved: "", - Token.Keyword.Type: "nobold ansired", - Token.Literal: "", - Token.Literal.Date: "", - Token.Literal.Number: "ansibrightblack", - Token.Literal.Number.Bin: "", - Token.Literal.Number.Float: "", - Token.Literal.Number.Hex: "", - Token.Literal.Number.Integer: "", - Token.Literal.Number.Integer.Long: "", - Token.Literal.Number.Oct: "", - Token.Literal.String: "ansibrightred", - Token.Literal.String.Affix: "", - Token.Literal.String.Backtick: "", - Token.Literal.String.Char: "", - Token.Literal.String.Delimiter: "", - Token.Literal.String.Doc: "underline", - Token.Literal.String.Double: "", - Token.Literal.String.Escape: "bold ansiyellow", - Token.Literal.String.Heredoc: "", - Token.Literal.String.Interpol: "bold ansimagenta", - Token.Literal.String.Other: "ansigreen", - Token.Literal.String.Regex: "ansimagenta", - Token.Literal.String.Single: "", - Token.Literal.String.Symbol: "ansiyellow", - Token.Menu.Completions: "bg:ansigray ansiblack", - Token.Menu.Completions.Completion: "", - Token.Menu.Completions.Completion.Current: "bg:ansibrightblack ansiwhite", - Token.Name: "", - Token.Name.Attribute: "ansibrightyellow", - Token.Name.Builtin: "ansigreen", - Token.Name.Builtin.Pseudo: "", - Token.Name.Class: "bold ansibrightblue", - Token.Name.Constant: "ansired", - Token.Name.Decorator: "ansibrightmagenta", - Token.Name.Entity: "bold ansigray", - Token.Name.Exception: "bold ansibrightred", - Token.Name.Function: "ansibrightblue", - Token.Name.Function.Magic: "", - Token.Name.Label: "ansibrightyellow", - Token.Name.Namespace: "bold ansibrightblue", - Token.Name.Other: "", - Token.Name.Property: "", - Token.Name.Tag: "bold ansigreen", - Token.Name.Variable: "ansiblue", - Token.Name.Variable.Class: "", - Token.Name.Variable.Global: "", - Token.Name.Variable.Instance: "", - Token.Name.Variable.Magic: "", - Token.Operator: "ansibrightblack", - Token.Operator.Word: "bold ansimagenta", - Token.Other: "", - Token.Punctuation: "", - Token.Scrollbar: "bg:ansibrightblack", - Token.Scrollbar.Arrow: "bg:ansiblack ansiwhite bold", - Token.Scrollbar.Button: "bg:ansiblack", - Token.Text: "", - Token.Text.Whitespace: "ansigray", - }, - ), - globals(), - "DEFAULT_STYLE_DICT", -) - -PTK2_STYLE = { - "completion-menu": "bg:ansicyan ansiwhite", - "completion-menu.completion": "bg:#008888 #ffffff", - "completion-menu.completion.current": "bg:ansibrightblack ansiwhite", - "completion-menu.meta.completion": "bg:#00aaaa #ffffff", - "completion-menu.meta.completion.current": "bg:#00aaaa #000000", - "scrollbar.background": "bg:ansibrightblack", - "scrollbar.arrow": "bg:ansiblack ansiwhite bold", - "scrollbar.button": "bg:ansiblack", -} diff --git a/xonsh/teepty.py b/xonsh/teepty.py new file mode 100644 index 0000000..f5b9929 --- /dev/null +++ b/xonsh/teepty.py @@ -0,0 +1,331 @@ +"""This implements a psuedo-TTY that tees its output into a Python buffer. + +This file was forked from a version distibuted under an MIT license and +Copyright (c) 2011 Joshua D. Bartlett. +See http://sqizit.bartletts.id.au/2011/02/14/pseudo-terminals-in-python/ for +more information. +""" +import io +import re +import os +import sys +import tty +import pty +import time +import array +import fcntl +import select +import signal +import termios +import tempfile +import threading + +# The following escape codes are xterm codes. +# See http://rtfm.etla.org/xterm/ctlseq.html for more. +MODE_NUMS = ('1049', '47', '1047') +START_ALTERNATE_MODE = frozenset('\x1b[?{0}h'.format(i).encode() for i in MODE_NUMS) +END_ALTERNATE_MODE = frozenset('\x1b[?{0}l'.format(i).encode() for i in MODE_NUMS) +ALTERNATE_MODE_FLAGS = tuple(START_ALTERNATE_MODE) + tuple(END_ALTERNATE_MODE) + +RE_HIDDEN = re.compile(b'(\001.*?\002)') +RE_COLOR = re.compile(b'\033\[\d+;?\d*m') + +def _findfirst(s, substrs): + """Finds whichever of the given substrings occurs first in the given string + and returns that substring, or returns None if no such strings occur. + """ + i = len(s) + result = None + for substr in substrs: + pos = s.find(substr) + if -1 < pos < i: + i = pos + result = substr + return i, result + + +def _on_main_thread(): + """Checks if we are on the main thread or not. Duplicated from xonsh.tools + here so that this module only relies on the Python standrd library. + """ + return threading.current_thread() is threading.main_thread() + + +def _find_error_code(e): + """Gets the approriate error code for an exception e, see + http://tldp.org/LDP/abs/html/exitcodes.html for exit codes. + """ + if isinstance(e, PermissionError): + code = 126 + elif isinstance(e, FileNotFoundError): + code = 127 + else: + code = 1 + return code + + +class TeePTY(object): + """This class is a pseudo terminal that tees the stdout and stderr into a buffer.""" + + def __init__(self, bufsize=1024, remove_color=True, encoding='utf-8', + errors='strict'): + """ + Parameters + ---------- + bufsize : int, optional + The buffer size to read from the root terminal to/from the tee'd terminal. + remove_color : bool, optional + Removes color codes from the tee'd buffer, though not the TTY. + encoding : str, optional + The encoding to use when decoding into a str. + errors : str, optional + The encoding error flag to use when decoding into a str. + """ + self.bufsize = bufsize + self.pid = self.master_fd = None + self._in_alt_mode = False + self.remove_color = remove_color + self.encoding = encoding + self.errors = errors + self.buffer = io.BytesIO() + self.returncode = None + self._temp_stdin = None + + def __str__(self): + return self.buffer.getvalue().decode(encoding=self.encoding, + errors=self.errors) + + def __del__(self): + if self._temp_stdin is not None: + self._temp_stdin.close() + self._temp_stdin = None + + def spawn(self, argv=None, env=None, stdin=None, delay=None): + """Create a spawned process. Based on the code for pty.spawn(). + This cannot be used except from the main thread. + + Parameters + ---------- + argv : list of str, optional + Arguments to pass in as subprocess. In None, will execute $SHELL. + env : Mapping, optional + Environment to pass execute in. + delay : float, optional + Delay timing before executing process if piping in data. The value + is passed into time.sleep() so it is in [seconds]. If delay is None, + its value will attempted to be looked up from the environment + variable $TEEPTY_PIPE_DELAY, from the passed in env or os.environ. + If not present or not positive valued, no delay is used. + + Returns + ------- + returncode : int + Return code for the spawned process. + """ + assert self.master_fd is None + self._in_alt_mode = False + if not argv: + argv = [os.environ.get('SHELL', 'sh')] + argv = self._put_stdin_in_argv(argv, stdin) + + pid, master_fd = pty.fork() + self.pid = pid + self.master_fd = master_fd + if pid == pty.CHILD: + # determine if a piping delay is needed. + if self._temp_stdin is not None: + self._delay_for_pipe(env=env, delay=delay) + # ok, go + try: + if env is None: + os.execvp(argv[0], argv) + else: + os.execvpe(argv[0], argv, env) + except OSError as e: + os._exit(_find_error_code(e)) + else: + self._pipe_stdin(stdin) + + on_main_thread = _on_main_thread() + if on_main_thread: + old_handler = signal.signal(signal.SIGWINCH, self._signal_winch) + try: + mode = tty.tcgetattr(pty.STDIN_FILENO) + tty.setraw(pty.STDIN_FILENO) + restore = True + except tty.error: # This is the same as termios.error + restore = False + self._init_fd() + try: + self._copy() + except (IOError, OSError): + if restore: + tty.tcsetattr(pty.STDIN_FILENO, tty.TCSAFLUSH, mode) + + _, self.returncode = os.waitpid(pid, 0) + os.close(master_fd) + self.master_fd = None + self._in_alt_mode = False + if on_main_thread: + signal.signal(signal.SIGWINCH, old_handler) + return self.returncode + + def _init_fd(self): + """Called once when the pty is first set up.""" + self._set_pty_size() + + def _signal_winch(self, signum, frame): + """Signal handler for SIGWINCH - window size has changed.""" + self._set_pty_size() + + def _set_pty_size(self): + """Sets the window size of the child pty based on the window size of + our own controlling terminal. + """ + assert self.master_fd is not None + # Get the terminal size of the real terminal, set it on the + # pseudoterminal. + buf = array.array('h', [0, 0, 0, 0]) + fcntl.ioctl(pty.STDOUT_FILENO, termios.TIOCGWINSZ, buf, True) + fcntl.ioctl(self.master_fd, termios.TIOCSWINSZ, buf) + + def _copy(self): + """Main select loop. Passes all data to self.master_read() or self.stdin_read(). + """ + assert self.master_fd is not None + master_fd = self.master_fd + bufsize = self.bufsize + while True: + try: + rfds, wfds, xfds = select.select([master_fd, pty.STDIN_FILENO], [], []) + except OSError as e: + if e.errno == 4: # Interrupted system call. + continue # This happens at terminal resize. + if master_fd in rfds: + data = os.read(master_fd, bufsize) + self.write_stdout(data) + if pty.STDIN_FILENO in rfds: + data = os.read(pty.STDIN_FILENO, bufsize) + self.write_stdin(data) + + def _sanatize_data(self, data): + i, flag = _findfirst(data, ALTERNATE_MODE_FLAGS) + if flag is None and self._in_alt_mode: + return b'' + elif flag is not None: + if flag in START_ALTERNATE_MODE: + # This code is executed when the child process switches the terminal into + # alternate mode. The line below assumes that the user has opened vim, + # less, or similar, and writes writes to stdin. + d0 = data[:i] + self._in_alt_mode = True + d1 = self._sanatize_data(data[i+len(flag):]) + data = d0 + d1 + elif flag in END_ALTERNATE_MODE: + # This code is executed when the child process switches the terminal back + # out of alternate mode. The line below assumes that the user has + # returned to the command prompt. + self._in_alt_mode = False + data = self._sanatize_data(data[i+len(flag):]) + data = RE_HIDDEN.sub(b'', data) + if self.remove_color: + data = RE_COLOR.sub(b'', data) + return data + + def write_stdout(self, data): + """Writes to stdout as if the child process had written the data (bytes).""" + os.write(pty.STDOUT_FILENO, data) # write to real terminal + # tee to buffer + data = self._sanatize_data(data) + if len(data) > 0: + self.buffer.write(data) + + def write_stdin(self, data): + """Writes to the child process from its controlling terminal.""" + master_fd = self.master_fd + assert master_fd is not None + while len(data) > 0: + n = os.write(master_fd, data) + data = data[n:] + + def _stdin_filename(self, stdin): + if stdin is None: + rtn = None + elif isinstance(stdin, io.FileIO) and os.path.isfile(stdin.name): + rtn = stdin.name + elif isinstance(stdin, (io.BufferedIOBase, str, bytes)): + self._temp_stdin = tsi = tempfile.NamedTemporaryFile() + rtn = tsi.name + else: + raise ValueError('stdin not understood {0!r}'.format(stdin)) + return rtn + + def _put_stdin_in_argv(self, argv, stdin): + stdin_filename = self._stdin_filename(stdin) + if stdin_filename is None: + return argv + argv = list(argv) + # a lone dash '-' argument means stdin + if argv.count('-') == 0: + argv.append(stdin_filename) + else: + argv[argv.index('-')] = stdin_filename + return argv + + def _pipe_stdin(self, stdin): + if stdin is None or isinstance(stdin, io.FileIO): + return None + tsi = self._temp_stdin + bufsize = self.bufsize + if isinstance(stdin, io.BufferedIOBase): + buf = stdin.read(bufsize) + while len(buf) != 0: + tsi.write(buf) + tsi.flush() + buf = stdin.read(bufsize) + elif isinstance(stdin, (str, bytes)): + raw = stdin.encode() if isinstance(stdin, str) else stdin + for i in range((len(raw)//bufsize) + 1): + tsi.write(raw[i*bufsize:(i + 1)*bufsize]) + tsi.flush() + else: + raise ValueError('stdin not understood {0!r}'.format(stdin)) + + def _delay_for_pipe(self, env=None, delay=None): + # This delay is sometimes needed because the temporary stdin file that + # is being written (the pipe) may not have even hits its first flush() + # call by the time the spawned process starts up and determines there + # is nothing in the file. The spawn can thus exit, without doing any + # real work. Consider the case of piping something into grep: + # + # $ ps aux | grep root + # + # grep will exit on EOF and so there is a race between the buffersize + # and flushing the temporary file and grep. However, this race is not + # always meaningful. Pagers, for example, update when the file is written + # to. So what is important is that we start the spawned process ASAP: + # + # $ ps aux | less + # + # So there is a push-and-pull between the the competing objectives of + # not blocking and letting the spawned process have enough to work with + # such that it doesn't exit prematurely. Unfortunately, there is no + # way to know a priori how big the file is, how long the spawned process + # will run for, etc. Thus as user-definable delay let's the user + # find something that works for them. + if delay is None: + delay = (env or os.environ).get('TEEPTY_PIPE_DELAY', -1.0) + delay = float(delay) + if 0.0 < delay: + time.sleep(delay) + + +if __name__ == '__main__': + tpty = TeePTY() + tpty.spawn(sys.argv[1:]) + print('-=-'*10) + print(tpty.buffer.getvalue()) + print('-=-'*10) + print(tpty) + print('-=-'*10) + print('Returned with status {0}'.format(tpty.returncode)) diff --git a/xonsh/timings.py b/xonsh/timings.py index 13a0036..98a0249 100644 --- a/xonsh/timings.py +++ b/xonsh/timings.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """Timing related functionality for the xonsh shell. The following time_it alias and Timer was forked from the IPython project: @@ -7,7 +6,6 @@ * Copyright (c) 2001, Janko Hauser * Copyright (c) 2001, Nathaniel Gray """ -import os import gc import sys import math @@ -16,94 +14,49 @@ import builtins import itertools -from xonsh.lazyasd import lazyobject, lazybool -from xonsh.events import events -from xonsh.platform import ON_WINDOWS - - -@lazybool -def _HAVE_RESOURCE(): - try: - import resource as r - - have = True - except ImportError: - # There is no distinction of user/system time under windows, so we - # just use time.perf_counter() for everything... - have = False - return have - - -@lazyobject -def resource(): - import resource as r - - return r - - -@lazyobject -def clocku(): - if _HAVE_RESOURCE: - - def clocku(): - """clocku() -> floating point number - Return the *USER* CPU time in seconds since the start of the - process.""" - return resource.getrusage(resource.RUSAGE_SELF)[0] - - else: - clocku = time.perf_counter - return clocku - - -@lazyobject -def clocks(): - if _HAVE_RESOURCE: - - def clocks(): - """clocks() -> floating point number - Return the *SYSTEM* CPU time in seconds since the start of the - process.""" - return resource.getrusage(resource.RUSAGE_SELF)[1] - - else: - clocks = time.perf_counter - return clocks - - -@lazyobject -def clock(): - if _HAVE_RESOURCE: - - def clock(): - """clock() -> floating point number - Return the *TOTAL USER+SYSTEM* CPU time in seconds since the - start of the process.""" - u, s = resource.getrusage(resource.RUSAGE_SELF)[:2] - return u + s - - else: - clock = time.perf_counter - return clock - - -@lazyobject -def clock2(): - if _HAVE_RESOURCE: - - def clock2(): - """clock2() -> (t_user,t_system) - Similar to clock(), but return a tuple of user/system times.""" - return resource.getrusage(resource.RUSAGE_SELF)[:2] - - else: - - def clock2(): - """Under windows, system CPU time can't be measured. - This just returns perf_counter() and zero.""" - return time.perf_counter(), 0.0 - - return clock2 +try: + import resource + _HAVE_RESOURCE = True +except ImportError: + # There is no distinction of user/system time under windows, so we + # just use time.clock() for everything... + resource = None + _HAVE_RESOURCE = False + +if _HAVE_RESOURCE: + def clocku(): + """clocku() -> floating point number + Return the *USER* CPU time in seconds since the start of the process. + This is done via a call to resource.getrusage, so it avoids the + wraparound problems in time.clock().""" + return resource.getrusage(resource.RUSAGE_SELF)[0] + + def clocks(): + """clocks() -> floating point number + Return the *SYSTEM* CPU time in seconds since the start of the process. + This is done via a call to resource.getrusage, so it avoids the + wraparound problems in time.clock().""" + return resource.getrusage(resource.RUSAGE_SELF)[1] + + def clock(): + """clock() -> floating point number + Return the *TOTAL USER+SYSTEM* CPU time in seconds since the start of + the process. This is done via a call to resource.getrusage, so it + avoids the wraparound problems in time.clock().""" + u, s = resource.getrusage(resource.RUSAGE_SELF)[:2] + return u + s + + def clock2(): + """clock2() -> (t_user,t_system) + Similar to clock(), but return a tuple of user/system times.""" + return resource.getrusage(resource.RUSAGE_SELF)[:2] +else: + clocku = clocks = clock = time.clock + + def clock2(): + """Under windows, system CPU time can't be measured. + This just returns clock() and zero.""" + return time.clock(), 0.0 def format_time(timespan, precision=3): @@ -117,7 +70,7 @@ def format_time(timespan, precision=3): value = int(leftover / length) if value > 0: leftover = leftover % length - time.append("{0}{1}".format(str(value), suffix)) + time.append('{0}{1}'.format(str(value), suffix)) if leftover < 1: break return " ".join(time) @@ -125,12 +78,12 @@ def format_time(timespan, precision=3): # certain terminals. # See bug: https://bugs.launchpad.net/ipython/+bug/348466 # Try to prevent crashes by being more secure than it needs to - # E.g. eclipse is able to print a mu, but has no sys.stdout.encoding set. - units = ["s", "ms", "us", "ns"] # the save value - if hasattr(sys.stdout, "encoding") and sys.stdout.encoding: + # E.g. eclipse is able to print a µ, but has no sys.stdout.encoding set. + units = ["s", "ms", 'us', "ns"] # the save value + if hasattr(sys.stdout, 'encoding') and sys.stdout.encoding: try: - "\xb5".encode(sys.stdout.encoding) - units = ["s", "ms", "\xb5s", "ns"] + '\xb5'.encode(sys.stdout.encoding) + units = ["s", "ms", '\xb5s', "ns"] except Exception: pass scaling = [1, 1e3, 1e6, 1e9] @@ -139,7 +92,8 @@ def format_time(timespan, precision=3): order = min(-int(math.floor(math.log10(timespan)) // 3), 3) else: order = 3 - return "{1:.{0}g} {2}".format(precision, timespan * scaling[order], units[order]) + return "{1:.{0}g} {2}".format(precision, timespan * scaling[order], + units[order]) class Timer(timeit.Timer): @@ -147,7 +101,6 @@ class Timer(timeit.Timer): which is an undocumented implementation detail of CPython, not shared by PyPy. """ - # Timer.timeit copied from CPython 3.4.2 def timeit(self, number=timeit.default_number): """Time 'number' executions of the main statement. @@ -188,22 +141,21 @@ def timeit_alias(args, stdin=None): repeat = 3 precision = 3 # setup - ctx = builtins.__xonsh__.ctx + ctx = builtins.__xonsh_ctx__ timer = Timer(timer=clock) - stmt = " ".join(args) + stmt = ' '.join(args) innerstr = INNER_TEMPLATE.format(stmt=stmt) # Track compilation time so it can be reported if too long # Minimum time above which compilation time will be reported tc_min = 0.1 t0 = clock() - innercode = builtins.compilex( - innerstr, filename="", mode="exec", glbs=ctx - ) + innercode = builtins.compilex(innerstr, filename='', + mode='exec', glbs=ctx) tc = clock() - t0 # get inner func ns = {} - builtins.execx(innercode, glbs=ctx, locs=ns, mode="exec") - timer.inner = ns["inner"] + builtins.execx(innercode, glbs=ctx, locs=ns, mode='exec') + timer.inner = ns['inner'] # Check if there is a huge difference between the best and worst timings. worst_tuning = 0 if number == 0: @@ -224,114 +176,15 @@ def timeit_alias(args, stdin=None): worst = max(worst, worst_tuning) # Check best timing is greater than zero to avoid a # ZeroDivisionError. - # In cases where the slowest timing is less than 10 microseconds + # In cases where the slowest timing is lesser than 10 micoseconds # we assume that it does not really matter if the fastest # timing is 4 times faster than the slowest timing or not. if worst > 4 * best and best > 0 and worst > 1e-5: - print( - ( - "The slowest run took {0:0.2f} times longer than the " - "fastest. This could mean that an intermediate result " - "is being cached." - ).format(worst / best) - ) - print( - "{0} loops, best of {1}: {2} per loop".format( - number, repeat, format_time(best, precision) - ) - ) + print(('The slowest run took {0:0.2f} times longer than the ' + 'fastest. This could mean that an intermediate result ' + 'is being cached.').format(worst / best)) + print("{0} loops, best of {1}: {2} per loop" + .format(number, repeat, format_time(best, precision))) if tc > tc_min: print("Compiler time: {0:.2f} s".format(tc)) return - - -_timings = {"start": clock()} - - -def setup_timings(argv): - global _timings - if "--timings" in argv: - events.doc( - "on_timingprobe", - """ - on_timingprobe(name: str) -> None - - Fired to insert some timings into the startuptime list - """, - ) - - @events.on_timingprobe - def timing_on_timingprobe(name, **kw): - global _timings - _timings[name] = clock() - - @events.on_post_cmdloop - def timing_on_post_cmdloop(**kw): - global _timings - _timings["on_post_cmdloop"] = clock() - - @events.on_post_init - def timing_on_post_init(**kw): - global _timings - _timings["on_post_init"] = clock() - - @events.on_post_rc - def timing_on_post_rc(**kw): - global _timings - _timings["on_post_rc"] = clock() - - @events.on_postcommand - def timing_on_postcommand(**kw): - global _timings - _timings["on_postcommand"] = clock() - - @events.on_pre_cmdloop - def timing_on_pre_cmdloop(**kw): - global _timings - _timings["on_pre_cmdloop"] = clock() - - @events.on_pre_rc - def timing_on_pre_rc(**kw): - global _timings - _timings["on_pre_rc"] = clock() - - @events.on_precommand - def timing_on_precommand(**kw): - global _timings - _timings["on_precommand"] = clock() - - @events.on_ptk_create - def timing_on_ptk_create(**kw): - global _timings - _timings["on_ptk_create"] = clock() - - @events.on_chdir - def timing_on_chdir(**kw): - global _timings - _timings["on_chdir"] = clock() - - @events.on_post_prompt - def timing_on_post_prompt(**kw): - global _timings - _timings = {"on_post_prompt": clock()} - - @events.on_pre_prompt - def timing_on_pre_prompt(**kw): - global _timings - _timings["on_pre_prompt"] = clock() - times = list(_timings.items()) - times = sorted(times, key=lambda x: x[1]) - width = max(len(s) for s, _ in times) + 2 - header_format = "|{{:<{}}}|{{:^11}}|{{:^11}}|".format(width) - entry_format = "|{{:<{}}}|{{:^11.3f}}|{{:^11.3f}}|".format(width) - sepline = "|{}|{}|{}|".format("-" * width, "-" * 11, "-" * 11) - # Print result table - print(" Debug level: {}".format(os.getenv("XONSH_DEBUG", "Off"))) - print(sepline) - print(header_format.format("Event name", "Time (s)", "Delta (s)")) - print(sepline) - prevtime = tstart = times[0][1] - for name, ts in times: - print(entry_format.format(name, ts - tstart, ts - prevtime)) - prevtime = ts - print(sepline) diff --git a/xonsh/tokenize.py b/xonsh/tokenize.py deleted file mode 100644 index dc25c6b..0000000 --- a/xonsh/tokenize.py +++ /dev/null @@ -1,1210 +0,0 @@ -"""Tokenization help for xonsh programs. - -This file is a modified version of tokenize.py form the Python 3.4 and 3.5 -standard libraries (licensed under the Python Software Foundation License, -version 2), which provides tokenization help for Python programs. - -It is modified to properly tokenize xonsh code, including backtick regex -path and several xonsh-specific operators. - -A few pieces of this file are specific to the version of Python being used. -To find these pieces, search the PY35. - -Original file credits: - __author__ = 'Ka-Ping Yee ' - __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, ' - 'Skip Montanaro, Raymond Hettinger, Trent Nelson, ' - 'Michael Foord') -""" - -import re -import io -import sys -import codecs -import builtins -import itertools -import collections -import token -from token import ( - AMPER, - AMPEREQUAL, - AT, - CIRCUMFLEX, - CIRCUMFLEXEQUAL, - COLON, - COMMA, - DEDENT, - DOT, - DOUBLESLASH, - DOUBLESLASHEQUAL, - DOUBLESTAR, - DOUBLESTAREQUAL, - ENDMARKER, - EQEQUAL, - EQUAL, - ERRORTOKEN, - GREATER, - GREATEREQUAL, - INDENT, - LBRACE, - LEFTSHIFT, - LEFTSHIFTEQUAL, - LESS, - LESSEQUAL, - LPAR, - LSQB, - MINEQUAL, - MINUS, - NAME, - NEWLINE, - NOTEQUAL, - NUMBER, - N_TOKENS, - OP, - PERCENT, - PERCENTEQUAL, - PLUS, - PLUSEQUAL, - RBRACE, - RIGHTSHIFT, - RIGHTSHIFTEQUAL, - RPAR, - RSQB, - SEMI, - SLASH, - SLASHEQUAL, - STAR, - STAREQUAL, - STRING, - TILDE, - VBAR, - VBAREQUAL, - tok_name, -) - -from xonsh.lazyasd import LazyObject -from xonsh.platform import PYTHON_VERSION_INFO - -cookie_re = LazyObject( - lambda: re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*([-\w.]+)", re.ASCII), - globals(), - "cookie_re", -) -blank_re = LazyObject( - lambda: re.compile(br"^[ \t\f]*(?:[#\r\n]|$)", re.ASCII), globals(), "blank_re" -) - -# -# token modifications -# -tok_name = tok_name.copy() -__all__ = token.__all__ + [ - "COMMENT", - "tokenize", - "detect_encoding", - "NL", - "untokenize", - "ENCODING", - "TokenInfo", - "TokenError", - "SEARCHPATH", - "ATDOLLAR", - "ATEQUAL", - "DOLLARNAME", - "IOREDIRECT", -] -HAS_ASYNC = (3, 5, 0) <= PYTHON_VERSION_INFO < (3, 7, 0) -if HAS_ASYNC: - ASYNC = token.ASYNC - AWAIT = token.AWAIT - ADDSPACE_TOKS = (NAME, NUMBER, ASYNC, AWAIT) -else: - ADDSPACE_TOKS = (NAME, NUMBER) -del token # must clean up token -PY35 = (3, 5, 0) <= PYTHON_VERSION_INFO -AUGASSIGN_OPS = r"[+\-*/%&@|^=<>]=?" -if not PY35: - AUGASSIGN_OPS = AUGASSIGN_OPS.replace("@", "") - - -COMMENT = N_TOKENS -tok_name[COMMENT] = "COMMENT" -NL = N_TOKENS + 1 -tok_name[NL] = "NL" -ENCODING = N_TOKENS + 2 -tok_name[ENCODING] = "ENCODING" -N_TOKENS += 3 -SEARCHPATH = N_TOKENS -tok_name[N_TOKENS] = "SEARCHPATH" -N_TOKENS += 1 -IOREDIRECT = N_TOKENS -tok_name[N_TOKENS] = "IOREDIRECT" -N_TOKENS += 1 -DOLLARNAME = N_TOKENS -tok_name[N_TOKENS] = "DOLLARNAME" -N_TOKENS += 1 -ATDOLLAR = N_TOKENS -tok_name[N_TOKENS] = "ATDOLLAR" -N_TOKENS += 1 -ATEQUAL = N_TOKENS -tok_name[N_TOKENS] = "ATEQUAL" -N_TOKENS += 1 -_xonsh_tokens = { - "?": "QUESTION", - "@=": "ATEQUAL", - "@$": "ATDOLLAR", - "||": "DOUBLEPIPE", - "&&": "DOUBLEAMPER", - "@(": "ATLPAREN", - "!(": "BANGLPAREN", - "![": "BANGLBRACKET", - "$(": "DOLLARLPAREN", - "$[": "DOLLARLBRACKET", - "${": "DOLLARLBRACE", - "??": "DOUBLEQUESTION", - "@$(": "ATDOLLARLPAREN", -} - -additional_parenlevs = frozenset({"@(", "!(", "![", "$(", "$[", "${", "@$("}) - -_glbs = globals() -for v in _xonsh_tokens.values(): - _glbs[v] = N_TOKENS - tok_name[N_TOKENS] = v - N_TOKENS += 1 - __all__.append(v) -del _glbs, v - -EXACT_TOKEN_TYPES = { - "(": LPAR, - ")": RPAR, - "[": LSQB, - "]": RSQB, - ":": COLON, - ",": COMMA, - ";": SEMI, - "+": PLUS, - "-": MINUS, - "*": STAR, - "/": SLASH, - "|": VBAR, - "&": AMPER, - "<": LESS, - ">": GREATER, - "=": EQUAL, - ".": DOT, - "%": PERCENT, - "{": LBRACE, - "}": RBRACE, - "==": EQEQUAL, - "!=": NOTEQUAL, - "<=": LESSEQUAL, - ">=": GREATEREQUAL, - "~": TILDE, - "^": CIRCUMFLEX, - "<<": LEFTSHIFT, - ">>": RIGHTSHIFT, - "**": DOUBLESTAR, - "+=": PLUSEQUAL, - "-=": MINEQUAL, - "*=": STAREQUAL, - "/=": SLASHEQUAL, - "%=": PERCENTEQUAL, - "&=": AMPEREQUAL, - "|=": VBAREQUAL, - "^=": CIRCUMFLEXEQUAL, - "<<=": LEFTSHIFTEQUAL, - ">>=": RIGHTSHIFTEQUAL, - "**=": DOUBLESTAREQUAL, - "//": DOUBLESLASH, - "//=": DOUBLESLASHEQUAL, - "@": AT, -} - -EXACT_TOKEN_TYPES.update(_xonsh_tokens) - - -class TokenInfo(collections.namedtuple("TokenInfo", "type string start end line")): - def __repr__(self): - annotated_type = "%d (%s)" % (self.type, tok_name[self.type]) - return ( - "TokenInfo(type=%s, string=%r, start=%r, end=%r, line=%r)" - % self._replace(type=annotated_type) - ) - - @property - def exact_type(self): - if self.type == OP and self.string in EXACT_TOKEN_TYPES: - return EXACT_TOKEN_TYPES[self.string] - else: - return self.type - - -def group(*choices): - return "(" + "|".join(choices) + ")" - - -def tokany(*choices): - return group(*choices) + "*" - - -def maybe(*choices): - return group(*choices) + "?" - - -# Note: we use unicode matching for names ("\w") but ascii matching for -# number literals. -Whitespace = r"[ \f\t]*" -Comment = r"#[^\r\n]*" -Ignore = Whitespace + tokany(r"\\\r?\n" + Whitespace) + maybe(Comment) -Name_RE = r"\$?\w+" - -Hexnumber = r"0[xX](?:_?[0-9a-fA-F])+" -Binnumber = r"0[bB](?:_?[01])+" -Octnumber = r"0[oO](?:_?[0-7])+" -Decnumber = r"(?:0(?:_?0)*|[1-9](?:_?[0-9])*)" -Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber) -Exponent = r"[eE][-+]?[0-9](?:_?[0-9])*" -Pointfloat = group( - r"[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?", r"\.[0-9](?:_?[0-9])*" -) + maybe(Exponent) -Expfloat = r"[0-9](?:_?[0-9])*" + Exponent -Floatnumber = group(Pointfloat, Expfloat) -Imagnumber = group(r"[0-9](?:_?[0-9])*[jJ]", Floatnumber + r"[jJ]") -Number = group(Imagnumber, Floatnumber, Intnumber) - -StringPrefix = r"(?:[bB][rR]?|[p][fFrR]?|[rR][bBpfF]?|[uU]|[fF][rR]?[p]?)?" - -# Tail end of ' string. -Single = r"[^'\\]*(?:\\.[^'\\]*)*'" -# Tail end of " string. -Double = r'[^"\\]*(?:\\.[^"\\]*)*"' -# Tail end of ''' string. -Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''" -# Tail end of """ string. -Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""' -Triple = group(StringPrefix + "'''", StringPrefix + '"""') -# Single-line ' or " string. -String = group( - StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'", - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"', -) - -# Xonsh-specific Syntax -SearchPath = r"((?:[rgp]+|@\w*)?)`([^\n`\\]*(?:\\.[^\n`\\]*)*)`" - -# Because of leftmost-then-longest match semantics, be sure to put the -# longest operators first (e.g., if = came before ==, == would get -# recognized as two instances of =). -_redir_names = ("out", "all", "err", "e", "2", "a", "&", "1", "o") -_redir_map = ( - # stderr to stdout - "err>out", - "err>&1", - "2>out", - "err>o", - "err>1", - "e>out", - "e>&1", - "2>&1", - "e>o", - "2>o", - "e>1", - "2>1", - # stdout to stderr - "out>err", - "out>&2", - "1>err", - "out>e", - "out>2", - "o>err", - "o>&2", - "1>&2", - "o>e", - "1>e", - "o>2", - "1>2", -) -IORedirect = group(group(*_redir_map), "{}>>?".format(group(*_redir_names))) -_redir_check = set(_redir_map) -_redir_check = {"{}>".format(i) for i in _redir_names}.union(_redir_check) -_redir_check = {"{}>>".format(i) for i in _redir_names}.union(_redir_check) -_redir_check = frozenset(_redir_check) -Operator = group( - r"\*\*=?", - r">>=?", - r"<<=?", - r"!=", - r"//=?", - r"->", - r"@\$\(?", - r"\|\|", - "&&", - r"@\(", - r"!\(", - r"!\[", - r"\$\(", - r"\$\[", - r"\${", - r"\?\?", - r"\?", - AUGASSIGN_OPS, - r"~", -) - -Bracket = "[][(){}]" -Special = group(r"\r?\n", r"\.\.\.", r"[:;.,@]") -Funny = group(Operator, Bracket, Special) - -PlainToken = group(IORedirect, Number, Funny, String, Name_RE, SearchPath) - -# First (or only) line of ' or " string. -ContStr = group( - StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" + group("'", r"\\\r?\n"), - StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' + group('"', r"\\\r?\n"), -) -PseudoExtras = group(r"\\\r?\n|\Z", Comment, Triple, SearchPath) -PseudoToken = Whitespace + group( - PseudoExtras, IORedirect, Number, Funny, ContStr, Name_RE -) - - -def _compile(expr): - return re.compile(expr, re.UNICODE) - - -endpats = { - "'": Single, - '"': Double, - "'''": Single3, - '"""': Double3, - "r'''": Single3, - 'r"""': Double3, - "b'''": Single3, - 'b"""': Double3, - "f'''": Single3, - 'f"""': Double3, - "R'''": Single3, - 'R"""': Double3, - "B'''": Single3, - 'B"""': Double3, - "F'''": Single3, - 'F"""': Double3, - "br'''": Single3, - 'br"""': Double3, - "fr'''": Single3, - 'fr"""': Double3, - "fp'''": Single3, - 'fp"""': Double3, - "bR'''": Single3, - 'bR"""': Double3, - "Br'''": Single3, - 'Br"""': Double3, - "BR'''": Single3, - 'BR"""': Double3, - "rb'''": Single3, - 'rb"""': Double3, - "rf'''": Single3, - 'rf"""': Double3, - "Rb'''": Single3, - 'Rb"""': Double3, - "Fr'''": Single3, - 'Fr"""': Double3, - "Fp'''": Single3, - 'Fp"""': Double3, - "rB'''": Single3, - 'rB"""': Double3, - "rF'''": Single3, - 'rF"""': Double3, - "RB'''": Single3, - 'RB"""': Double3, - "RF'''": Single3, - 'RF"""': Double3, - "u'''": Single3, - 'u"""': Double3, - "U'''": Single3, - 'U"""': Double3, - "p'''": Single3, - 'p"""': Double3, - "pr'''": Single3, - 'pr"""': Double3, - "pf'''": Single3, - 'pf"""': Double3, - "pF'''": Single3, - 'pF"""': Double3, - "pR'''": Single3, - 'pR"""': Double3, - "rp'''": Single3, - 'rp"""': Double3, - "Rp'''": Single3, - 'Rp"""': Double3, - "r": None, - "R": None, - "b": None, - "B": None, - "u": None, - "U": None, - "p": None, - "f": None, - "F": None, -} - -triple_quoted = {} -for t in ( - "'''", - '"""', - "r'''", - 'r"""', - "R'''", - 'R"""', - "b'''", - 'b"""', - "B'''", - 'B"""', - "f'''", - 'f"""', - "F'''", - 'F"""', - "br'''", - 'br"""', - "Br'''", - 'Br"""', - "bR'''", - 'bR"""', - "BR'''", - 'BR"""', - "rb'''", - 'rb"""', - "rB'''", - 'rB"""', - "Rb'''", - 'Rb"""', - "RB'''", - 'RB"""', - "fr'''", - 'fr"""', - "Fr'''", - 'Fr"""', - "fR'''", - 'fR"""', - "FR'''", - 'FR"""', - "rf'''", - 'rf"""', - "rF'''", - 'rF"""', - "Rf'''", - 'Rf"""', - "RF'''", - 'RF"""', - "u'''", - 'u"""', - "U'''", - 'U"""', - "p'''", - 'p""""', - "pr'''", - 'pr""""', - "pR'''", - 'pR""""', - "rp'''", - 'rp""""', - "Rp'''", - 'Rp""""', - "pf'''", - 'pf""""', - "pF'''", - 'pF""""', - "fp'''", - 'fp""""', - "Fp'''", - 'Fp""""', -): - triple_quoted[t] = t -single_quoted = {} -for t in ( - "'", - '"', - "r'", - 'r"', - "R'", - 'R"', - "b'", - 'b"', - "B'", - 'B"', - "f'", - 'f"', - "F'", - 'F"', - "br'", - 'br"', - "Br'", - 'Br"', - "bR'", - 'bR"', - "BR'", - 'BR"', - "rb'", - 'rb"', - "rB'", - 'rB"', - "Rb'", - 'Rb"', - "RB'", - 'RB"', - "fr'", - 'fr"', - "Fr'", - 'Fr"', - "fR'", - 'fR"', - "FR'", - 'FR"', - "rf'", - 'rf"', - "rF'", - 'rF"', - "Rf'", - 'Rf"', - "RF'", - 'RF"', - "u'", - 'u"', - "U'", - 'U"', - "p'", - 'p"', - "pr'", - 'pr"', - "pR'", - 'pR"', - "rp'", - 'rp"', - "Rp'", - 'Rp"', - "pf'", - 'pf"', - "pF'", - 'pF"', - "fp'", - 'fp"', - "Fp'", - 'Fp"', -): - single_quoted[t] = t - -tabsize = 8 - - -class TokenError(Exception): - pass - - -class StopTokenizing(Exception): - pass - - -class Untokenizer: - def __init__(self): - self.tokens = [] - self.prev_row = 1 - self.prev_col = 0 - self.encoding = None - - def add_whitespace(self, start): - row, col = start - if row < self.prev_row or row == self.prev_row and col < self.prev_col: - raise ValueError( - "start ({},{}) precedes previous end ({},{})".format( - row, col, self.prev_row, self.prev_col - ) - ) - row_offset = row - self.prev_row - if row_offset: - self.tokens.append("\\\n" * row_offset) - self.prev_col = 0 - col_offset = col - self.prev_col - if col_offset: - self.tokens.append(" " * col_offset) - - def untokenize(self, iterable): - it = iter(iterable) - indents = [] - startline = False - for t in it: - if len(t) == 2: - self.compat(t, it) - break - tok_type, token, start, end, line = t - if tok_type == ENCODING: - self.encoding = token - continue - if tok_type == ENDMARKER: - break - if tok_type == INDENT: - indents.append(token) - continue - elif tok_type == DEDENT: - indents.pop() - self.prev_row, self.prev_col = end - continue - elif tok_type in (NEWLINE, NL): - startline = True - elif startline and indents: - indent = indents[-1] - if start[1] >= len(indent): - self.tokens.append(indent) - self.prev_col = len(indent) - startline = False - self.add_whitespace(start) - self.tokens.append(token) - self.prev_row, self.prev_col = end - if tok_type in (NEWLINE, NL): - self.prev_row += 1 - self.prev_col = 0 - return "".join(self.tokens) - - def compat(self, token, iterable): - indents = [] - toks_append = self.tokens.append - startline = token[0] in (NEWLINE, NL) - prevstring = False - - for tok in itertools.chain([token], iterable): - toknum, tokval = tok[:2] - if toknum == ENCODING: - self.encoding = tokval - continue - - if toknum in ADDSPACE_TOKS: - tokval += " " - - # Insert a space between two consecutive strings - if toknum == STRING: - if prevstring: - tokval = " " + tokval - prevstring = True - else: - prevstring = False - - if toknum == INDENT: - indents.append(tokval) - continue - elif toknum == DEDENT: - indents.pop() - continue - elif toknum in (NEWLINE, NL): - startline = True - elif startline and indents: - toks_append(indents[-1]) - startline = False - toks_append(tokval) - - -def untokenize(iterable): - """Transform tokens back into Python source code. - It returns a bytes object, encoded using the ENCODING - token, which is the first token sequence output by tokenize. - - Each element returned by the iterable must be a token sequence - with at least two elements, a token number and token value. If - only two tokens are passed, the resulting output is poor. - - Round-trip invariant for full input: - Untokenized source will match input source exactly - - Round-trip invariant for limited intput: - # Output bytes will tokenize the back to the input - t1 = [tok[:2] for tok in tokenize(f.readline)] - newcode = untokenize(t1) - readline = BytesIO(newcode).readline - t2 = [tok[:2] for tok in tokenize(readline)] - assert t1 == t2 - """ - ut = Untokenizer() - out = ut.untokenize(iterable) - if ut.encoding is not None: - out = out.encode(ut.encoding) - return out - - -def _get_normal_name(orig_enc): - """Imitates get_normal_name in tokenizer.c.""" - # Only care about the first 12 characters. - enc = orig_enc[:12].lower().replace("_", "-") - if enc == "utf-8" or enc.startswith("utf-8-"): - return "utf-8" - if enc in ("latin-1", "iso-8859-1", "iso-latin-1") or enc.startswith( - ("latin-1-", "iso-8859-1-", "iso-latin-1-") - ): - return "iso-8859-1" - return orig_enc - - -def detect_encoding(readline): - """ - The detect_encoding() function is used to detect the encoding that should - be used to decode a Python source file. It requires one argument, readline, - in the same way as the tokenize() generator. - - It will call readline a maximum of twice, and return the encoding used - (as a string) and a list of any lines (left as bytes) it has read in. - - It detects the encoding from the presence of a utf-8 bom or an encoding - cookie as specified in pep-0263. If both a bom and a cookie are present, - but disagree, a SyntaxError will be raised. If the encoding cookie is an - invalid charset, raise a SyntaxError. Note that if a utf-8 bom is found, - 'utf-8-sig' is returned. - - If no encoding is specified, then the default of 'utf-8' will be returned. - """ - try: - filename = readline.__self__.name - except AttributeError: - filename = None - bom_found = False - encoding = None - default = "utf-8" - - def read_or_stop(): - try: - return readline() - except StopIteration: - return b"" - - def find_cookie(line): - try: - # Decode as UTF-8. Either the line is an encoding declaration, - # in which case it should be pure ASCII, or it must be UTF-8 - # per default encoding. - line_string = line.decode("utf-8") - except UnicodeDecodeError: - msg = "invalid or missing encoding declaration" - if filename is not None: - msg = "{} for {!r}".format(msg, filename) - raise SyntaxError(msg) - - match = cookie_re.match(line_string) - if not match: - return None - encoding = _get_normal_name(match.group(1)) - try: - codecs.lookup(encoding) - except LookupError: - # This behaviour mimics the Python interpreter - if filename is None: - msg = "unknown encoding: " + encoding - else: - msg = "unknown encoding for {!r}: {}".format(filename, encoding) - raise SyntaxError(msg) - - if bom_found: - if encoding != "utf-8": - # This behaviour mimics the Python interpreter - if filename is None: - msg = "encoding problem: utf-8" - else: - msg = "encoding problem for {!r}: utf-8".format(filename) - raise SyntaxError(msg) - encoding += "-sig" - return encoding - - first = read_or_stop() - if first.startswith(codecs.BOM_UTF8): - bom_found = True - first = first[3:] - default = "utf-8-sig" - if not first: - return default, [] - - encoding = find_cookie(first) - if encoding: - return encoding, [first] - if not blank_re.match(first): - return default, [first] - - second = read_or_stop() - if not second: - return default, [first] - - encoding = find_cookie(second) - if encoding: - return encoding, [first, second] - - return default, [first, second] - - -def tokopen(filename): - """Open a file in read only mode using the encoding detected by - detect_encoding(). - """ - buffer = builtins.open(filename, "rb") - try: - encoding, lines = detect_encoding(buffer.readline) - buffer.seek(0) - text = io.TextIOWrapper(buffer, encoding, line_buffering=True) - text.mode = "r" - return text - except Exception: - buffer.close() - raise - - -def _tokenize(readline, encoding): - lnum = parenlev = continued = 0 - numchars = "0123456789" - contstr, needcont = "", 0 - contline = None - indents = [0] - - # 'stashed' and 'async_*' are used for async/await parsing - stashed = None - async_def = False - async_def_indent = 0 - async_def_nl = False - - if encoding is not None: - if encoding == "utf-8-sig": - # BOM will already have been stripped. - encoding = "utf-8" - yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), "") - while True: # loop over lines in stream - try: - line = readline() - except StopIteration: - line = b"" - - if encoding is not None: - line = line.decode(encoding) - lnum += 1 - pos, max = 0, len(line) - - if contstr: # continued string - if not line: - raise TokenError("EOF in multi-line string", strstart) - endmatch = endprog.match(line) - if endmatch: - pos = end = endmatch.end(0) - yield TokenInfo( - STRING, contstr + line[:end], strstart, (lnum, end), contline + line - ) - contstr, needcont = "", 0 - contline = None - elif needcont and line[-2:] != "\\\n" and line[-3:] != "\\\r\n": - yield TokenInfo( - ERRORTOKEN, contstr + line, strstart, (lnum, len(line)), contline - ) - contstr = "" - contline = None - continue - else: - contstr = contstr + line - contline = contline + line - continue - - elif parenlev == 0 and not continued: # new statement - if not line: - break - column = 0 - while pos < max: # measure leading whitespace - if line[pos] == " ": - column += 1 - elif line[pos] == "\t": - column = (column // tabsize + 1) * tabsize - elif line[pos] == "\f": - column = 0 - else: - break - pos += 1 - if pos == max: - break - - if line[pos] in "#\r\n": # skip comments or blank lines - if line[pos] == "#": - comment_token = line[pos:].rstrip("\r\n") - nl_pos = pos + len(comment_token) - yield TokenInfo( - COMMENT, - comment_token, - (lnum, pos), - (lnum, pos + len(comment_token)), - line, - ) - yield TokenInfo( - NL, line[nl_pos:], (lnum, nl_pos), (lnum, len(line)), line - ) - else: - yield TokenInfo( - (NL, COMMENT)[line[pos] == "#"], - line[pos:], - (lnum, pos), - (lnum, len(line)), - line, - ) - continue - - if column > indents[-1]: # count indents or dedents - indents.append(column) - yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line) - while column < indents[-1]: - if column not in indents: - raise IndentationError( - "unindent does not match any outer indentation level", - ("", lnum, pos, line), - ) - indents = indents[:-1] - - if async_def and async_def_indent >= indents[-1]: - async_def = False - async_def_nl = False - async_def_indent = 0 - - yield TokenInfo(DEDENT, "", (lnum, pos), (lnum, pos), line) - - if async_def and async_def_nl and async_def_indent >= indents[-1]: - async_def = False - async_def_nl = False - async_def_indent = 0 - - else: # continued statement - if not line: - raise TokenError("EOF in multi-line statement", (lnum, 0)) - continued = 0 - - while pos < max: - pseudomatch = _compile(PseudoToken).match(line, pos) - if pseudomatch: # scan for tokens - start, end = pseudomatch.span(1) - spos, epos, pos = (lnum, start), (lnum, end), end - if start == end: - continue - token, initial = line[start:end], line[start] - - if token in _redir_check: - yield TokenInfo(IOREDIRECT, token, spos, epos, line) - elif initial in numchars or ( # ordinary number - initial == "." and token != "." and token != "..." - ): - yield TokenInfo(NUMBER, token, spos, epos, line) - elif initial in "\r\n": - if stashed: - yield stashed - stashed = None - if parenlev > 0: - yield TokenInfo(NL, token, spos, epos, line) - else: - yield TokenInfo(NEWLINE, token, spos, epos, line) - if async_def: - async_def_nl = True - - elif initial == "#": - assert not token.endswith("\n") - if stashed: - yield stashed - stashed = None - yield TokenInfo(COMMENT, token, spos, epos, line) - # Xonsh-specific Regex Globbing - elif re.match(SearchPath, token): - yield TokenInfo(SEARCHPATH, token, spos, epos, line) - elif token in triple_quoted: - endprog = _compile(endpats[token]) - endmatch = endprog.match(line, pos) - if endmatch: # all on one line - pos = endmatch.end(0) - token = line[start:pos] - yield TokenInfo(STRING, token, spos, (lnum, pos), line) - else: - strstart = (lnum, start) # multiple lines - contstr = line[start:] - contline = line - break - elif ( - initial in single_quoted - or token[:2] in single_quoted - or token[:3] in single_quoted - ): - if token[-1] == "\n": # continued string - strstart = (lnum, start) - endprog = _compile( - endpats[initial] or endpats[token[1]] or endpats[token[2]] - ) - contstr, needcont = line[start:], 1 - contline = line - break - else: # ordinary string - yield TokenInfo(STRING, token, spos, epos, line) - elif token.startswith("$") and token[1:].isidentifier(): - yield TokenInfo(DOLLARNAME, token, spos, epos, line) - elif initial.isidentifier(): # ordinary name - if token in ("async", "await"): - if async_def: - yield TokenInfo( - ASYNC if token == "async" else AWAIT, - token, - spos, - epos, - line, - ) - continue - - tok = TokenInfo(NAME, token, spos, epos, line) - if token == "async" and not stashed: - stashed = tok - continue - - if ( - HAS_ASYNC - and token == "def" - and ( - stashed - and stashed.type == NAME - and stashed.string == "async" - ) - ): - async_def = True - async_def_indent = indents[-1] - - yield TokenInfo( - ASYNC, - stashed.string, - stashed.start, - stashed.end, - stashed.line, - ) - stashed = None - - if stashed: - yield stashed - stashed = None - - yield tok - elif token == "\\\n" or token == "\\\r\n": # continued stmt - continued = 1 - yield TokenInfo(ERRORTOKEN, token, spos, epos, line) - elif initial == "\\": # continued stmt - # for cases like C:\\path\\to\\file - continued = 1 - else: - if initial in "([{": - parenlev += 1 - elif initial in ")]}": - parenlev -= 1 - elif token in additional_parenlevs: - parenlev += 1 - if stashed: - yield stashed - stashed = None - yield TokenInfo(OP, token, spos, epos, line) - else: - yield TokenInfo( - ERRORTOKEN, line[pos], (lnum, pos), (lnum, pos + 1), line - ) - pos += 1 - - if stashed: - yield stashed - stashed = None - - for indent in indents[1:]: # pop remaining indent levels - yield TokenInfo(DEDENT, "", (lnum, 0), (lnum, 0), "") - yield TokenInfo(ENDMARKER, "", (lnum, 0), (lnum, 0), "") - - -def tokenize(readline): - """ - The tokenize() generator requires one argument, readline, which - must be a callable object which provides the same interface as the - readline() method of built-in file objects. Each call to the function - should return one line of input as bytes. Alternately, readline - can be a callable function terminating with StopIteration: - readline = open(myfile, 'rb').__next__ # Example of alternate readline - - The generator produces 5-tuples with these members: the token type; the - token string; a 2-tuple (srow, scol) of ints specifying the row and - column where the token begins in the source; a 2-tuple (erow, ecol) of - ints specifying the row and column where the token ends in the source; - and the line on which the token was found. The line passed is the - logical line; continuation lines are included. - - The first token sequence will always be an ENCODING token - which tells you which encoding was used to decode the bytes stream. - """ - encoding, consumed = detect_encoding(readline) - rl_gen = iter(readline, b"") - empty = itertools.repeat(b"") - return _tokenize(itertools.chain(consumed, rl_gen, empty).__next__, encoding) - - -# An undocumented, backwards compatible, API for all the places in the standard -# library that expect to be able to use tokenize with strings -def generate_tokens(readline): - return _tokenize(readline, None) - - -def tokenize_main(): - import argparse - - # Helper error handling routines - def perror(message): - print(message, file=sys.stderr) - - def error(message, filename=None, location=None): - if location: - args = (filename,) + location + (message,) - perror("%s:%d:%d: error: %s" % args) - elif filename: - perror("%s: error: %s" % (filename, message)) - else: - perror("error: %s" % message) - sys.exit(1) - - # Parse the arguments and options - parser = argparse.ArgumentParser(prog="python -m tokenize") - parser.add_argument( - dest="filename", - nargs="?", - metavar="filename.py", - help="the file to tokenize; defaults to stdin", - ) - parser.add_argument( - "-e", - "--exact", - dest="exact", - action="store_true", - help="display token names using the exact type", - ) - args = parser.parse_args() - - try: - # Tokenize the input - if args.filename: - filename = args.filename - with builtins.open(filename, "rb") as f: - tokens = list(tokenize(f.readline)) - else: - filename = "" - tokens = _tokenize(sys.stdin.readline, None) - - # Output the tokenization - for token in tokens: - token_type = token.type - if args.exact: - token_type = token.exact_type - token_range = "%d,%d-%d,%d:" % (token.start + token.end) - print("%-20s%-15s%-15r" % (token_range, tok_name[token_type], token.string)) - except IndentationError as err: - line, column = err.args[1][1:3] - error(err.args[0], filename, (line, column)) - except TokenError as err: - line, column = err.args[1] - error(err.args[0], filename, (line, column)) - except SyntaxError as err: - error(err, filename) - except OSError as err: - error(err) - except KeyboardInterrupt: - print("interrupted\n") - except Exception as err: - perror("unexpected error: %s" % err) - raise diff --git a/xonsh/tools.py b/xonsh/tools.py index 98e3531..4263fc0 100644 --- a/xonsh/tools.py +++ b/xonsh/tools.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """Misc. xonsh tools. The following implementations were forked from the IPython project: @@ -17,615 +16,129 @@ * indent() """ -import builtins -import collections -import collections.abc as cabc -import contextlib -import ctypes -import datetime -from distutils.version import LooseVersion -import functools -import glob -import itertools import os -import pathlib import re -import subprocess import sys -import threading +import builtins +import platform import traceback -import warnings -import operator -import ast -import string - -# adding imports from further xonsh modules is discouraged to avoid circular -# dependencies -from xonsh import __version__ -from xonsh.lazyasd import LazyObject, LazyDict, lazyobject -from xonsh.platform import ( - scandir, - DEFAULT_ENCODING, - ON_LINUX, - ON_WINDOWS, - PYTHON_VERSION_INFO, - expanduser, - os_environ, -) - - -@functools.lru_cache(1) -def is_superuser(): - if ON_WINDOWS: - rtn = ctypes.windll.shell32.IsUserAnAdmin() != 0 - else: - rtn = os.getuid() == 0 - return rtn +import threading +import subprocess +from itertools import zip_longest +from contextlib import contextmanager +from collections import OrderedDict, Sequence + + +if sys.version_info[0] >= 3: + string_types = (str, bytes) + unicode_type = str +else: + string_types = (str, unicode) + unicode_type = unicode + +DEFAULT_ENCODING = sys.getdefaultencoding() + +ON_WINDOWS = (platform.system() == 'Windows') +ON_MAC = (platform.system() == 'Darwin') +ON_LINUX = (platform.system() == 'Linux') +ON_ARCH = (platform.linux_distribution()[0] == 'arch') +ON_POSIX = (os.name == 'posix') + +VER_3_4 = (3, 4) +VER_3_5 = (3, 5) +VER_MAJOR_MINOR = sys.version_info[:2] +V_MAJOR_MINOR = 'v{0}{1}'.format(*sys.version_info[:2]) + +def docstring_by_version(**kwargs): + """Sets a docstring by the python version.""" + doc = kwargs.get(V_MAJOR_MINOR, None) + if V_MAJOR_MINOR is None: + raise RuntimeError('unrecognized version ' + V_MAJOR_MINOR) + def dec(f): + f.__doc__ = doc + return f + return dec class XonshError(Exception): pass -class XonshCalledProcessError(XonshError, subprocess.CalledProcessError): - """Raised when there's an error with a called process - - Inherits from XonshError and subprocess.CalledProcessError, catching - either will also catch this error. - - Raised *after* iterating over stdout of a captured command, if the - returncode of the command is nonzero. - - Example: - try: - for line in !(ls): - print(line) - except subprocess.CalledProcessError as error: - print("Error in process: {}.format(error.completed_command.pid)) - - This also handles differences between Python3.4 and 3.5 where - CalledProcessError is concerned. - """ - - def __init__( - self, returncode, command, output=None, stderr=None, completed_command=None - ): - super().__init__(returncode, command, output) - self.stderr = stderr - self.completed_command = completed_command - - -def expand_path(s, expand_user=True): - """Takes a string path and expands ~ to home if expand_user is set - and environment vars if EXPAND_ENV_VARS is set.""" - session = getattr(builtins, "__xonsh__", None) - env = os_environ if session is None else getattr(session, "env", os_environ) - if env.get("EXPAND_ENV_VARS", False): - s = expandvars(s) - if expand_user: - # expand ~ according to Bash unquoted rules "Each variable assignment is - # checked for unquoted tilde-prefixes immediately following a ':' or the - # first '='". See the following for more details. - # https://www.gnu.org/software/bash/manual/html_node/Tilde-Expansion.html - pre, char, post = s.partition("=") - if char: - s = expanduser(pre) + char - s += os.pathsep.join(map(expanduser, post.split(os.pathsep))) - else: - s = expanduser(s) - return s - - -def _expandpath(path): - """Performs environment variable / user expansion on a given path - if EXPAND_ENV_VARS is set. - """ - session = getattr(builtins, "__xonsh__", None) - env = os_environ if session is None else getattr(session, "env", os_environ) - expand_user = env.get("EXPAND_ENV_VARS", False) - return expand_path(path, expand_user=expand_user) - - -def decode_bytes(b): - """Tries to decode the bytes using XONSH_ENCODING if available, - otherwise using sys.getdefaultencoding(). - """ - session = getattr(builtins, "__xonsh__", None) - env = os_environ if session is None else getattr(session, "env", os_environ) - enc = env.get("XONSH_ENCODING") or DEFAULT_ENCODING - err = env.get("XONSH_ENCODING_ERRORS") or "strict" - return b.decode(encoding=enc, errors=err) - - -def findfirst(s, substrs): - """Finds whichever of the given substrings occurs first in the given string - and returns that substring, or returns None if no such strings occur. - """ - i = len(s) - result = None - for substr in substrs: - pos = s.find(substr) - if -1 < pos < i: - i = pos - result = substr - return i, result - - -class EnvPath(cabc.MutableSequence): - """A class that implements an environment path, which is a list of - strings. Provides a custom method that expands all paths if the - relevant env variable has been set. - """ - - def __init__(self, args=None): - if not args: - self._l = [] - else: - if isinstance(args, str): - self._l = args.split(os.pathsep) - elif isinstance(args, pathlib.Path): - self._l = [args] - elif isinstance(args, bytes): - # decode bytes to a string and then split based on - # the default path separator - self._l = decode_bytes(args).split(os.pathsep) - elif isinstance(args, cabc.Iterable): - # put everything in a list -before- performing the type check - # in order to be able to retrieve it later, for cases such as - # when a generator expression was passed as an argument - args = list(args) - if not all(isinstance(i, (str, bytes, pathlib.Path)) for i in args): - # make TypeError's message as informative as possible - # when given an invalid initialization sequence - raise TypeError( - "EnvPath's initialization sequence should only " - "contain str, bytes and pathlib.Path entries" - ) - self._l = args - else: - raise TypeError( - "EnvPath cannot be initialized with items " - "of type %s" % type(args) - ) - - def __getitem__(self, item): - # handle slices separately - if isinstance(item, slice): - return [_expandpath(i) for i in self._l[item]] - else: - return _expandpath(self._l[item]) - - def __setitem__(self, index, item): - self._l.__setitem__(index, item) - - def __len__(self): - return len(self._l) - - def __delitem__(self, key): - self._l.__delitem__(key) - - def insert(self, index, value): - self._l.insert(index, value) - - @property - def paths(self): - """ - Returns the list of directories that this EnvPath contains. - """ - return list(self) - - def __repr__(self): - return repr(self._l) - - def __eq__(self, other): - if len(self) != len(other): - return False - return all(map(operator.eq, self, other)) - - def _repr_pretty_(self, p, cycle): - """ Pretty print path list """ - if cycle: - p.text("EnvPath(...)") - else: - with p.group(1, "EnvPath(\n[", "]\n)"): - for idx, item in enumerate(self): - if idx: - p.text(",") - p.breakable() - p.pretty(item) - - def __add__(self, other): - if isinstance(other, EnvPath): - other = other._l - return EnvPath(self._l + other) - - def __radd__(self, other): - if isinstance(other, EnvPath): - other = other._l - return EnvPath(other + self._l) - - def add(self, data, front=False, replace=False): - """Add a value to this EnvPath, - - path.add(data, front=bool, replace=bool) -> ensures that path contains data, with position determined by kwargs - - Parameters - ---------- - data : string or bytes or pathlib.Path - value to be added - front : bool - whether the value should be added to the front, will be - ignored if the data already exists in this EnvPath and - replace is False - Default : False - replace : bool - If True, the value will be removed and added to the - start or end(depending on the value of front) - Default : False - - Returns - ------- - None - - """ - if data not in self._l: - self._l.insert(0 if front else len(self._l), data) - elif replace: - self._l.remove(data) - self._l.insert(0 if front else len(self._l), data) - - -@lazyobject -def FORMATTER(): - return string.Formatter() - - -class DefaultNotGivenType(object): - """Singleton for representing when no default value is given.""" - - __inst = None - - def __new__(cls): - if DefaultNotGivenType.__inst is None: - DefaultNotGivenType.__inst = object.__new__(cls) - return DefaultNotGivenType.__inst - - -DefaultNotGiven = DefaultNotGivenType() - -BEG_TOK_SKIPS = LazyObject( - lambda: frozenset(["WS", "INDENT", "NOT", "LPAREN"]), globals(), "BEG_TOK_SKIPS" -) -END_TOK_TYPES = LazyObject( - lambda: frozenset(["SEMI", "AND", "OR", "RPAREN"]), globals(), "END_TOK_TYPES" -) -RE_END_TOKS = LazyObject( - lambda: re.compile(r"(;|and|\&\&|or|\|\||\))"), globals(), "RE_END_TOKS" -) -LPARENS = LazyObject( - lambda: frozenset( - ["LPAREN", "AT_LPAREN", "BANG_LPAREN", "DOLLAR_LPAREN", "ATDOLLAR_LPAREN"] - ), - globals(), - "LPARENS", -) - - -def _is_not_lparen_and_rparen(lparens, rtok): - """Tests if an RPAREN token is matched with something other than a plain old - LPAREN type. - """ - # note that any([]) is False, so this covers len(lparens) == 0 - return rtok.type == "RPAREN" and any(x != "LPAREN" for x in lparens) - - -def balanced_parens(line, mincol=0, maxcol=None, lexer=None): - """Determines if parentheses are balanced in an expression.""" - line = line[mincol:maxcol] - if lexer is None: - lexer = builtins.__xonsh__.execer.parser.lexer - if "(" not in line and ")" not in line: - return True - cnt = 0 - lexer.input(line) - for tok in lexer: - if tok.type in LPARENS: - cnt += 1 - elif tok.type == "RPAREN": - cnt -= 1 - elif tok.type == "ERRORTOKEN" and ")" in tok.value: - cnt -= 1 - return cnt == 0 - - -def find_next_break(line, mincol=0, lexer=None): - """Returns the column number of the next logical break in subproc mode. - This function may be useful in finding the maxcol argument of - subproc_toks(). - """ - if mincol >= 1: - line = line[mincol:] - if lexer is None: - lexer = builtins.__xonsh__.execer.parser.lexer - if RE_END_TOKS.search(line) is None: - return None - maxcol = None - lparens = [] - lexer.input(line) - for tok in lexer: - if tok.type in LPARENS: - lparens.append(tok.type) - elif tok.type in END_TOK_TYPES: - if _is_not_lparen_and_rparen(lparens, tok): - lparens.pop() - else: - maxcol = tok.lexpos + mincol + 1 - break - elif tok.type == "ERRORTOKEN" and ")" in tok.value: - maxcol = tok.lexpos + mincol + 1 - break - elif tok.type == "BANG": - maxcol = mincol + len(line) + 1 - break - return maxcol - - -def _offset_from_prev_lines(line, last): - lines = line.splitlines(keepends=True)[:last] - return sum(map(len, lines)) - - -def subproc_toks( - line, mincol=-1, maxcol=None, lexer=None, returnline=False, greedy=False -): - """Encapsulates tokens in a source code line in a uncaptured - subprocess ![] starting at a minimum column. If there are no tokens - (ie in a comment line) this returns None. If greedy is True, it will encapsulate - normal parentheses. Greedy is False by default. +def subproc_toks(line, mincol=-1, maxcol=None, lexer=None, returnline=False): + """Excapsulates tokens in a source code line in a uncaptured + subprocess $[] starting at a minimum column. If there are no tokens + (ie in a comment line) this returns None. """ if lexer is None: - lexer = builtins.__xonsh__.execer.parser.lexer + lexer = builtins.__xonsh_execer__.parser.lexer if maxcol is None: maxcol = len(line) + 1 lexer.reset() lexer.input(line) toks = [] - lparens = [] - saw_macro = False end_offset = 0 for tok in lexer: pos = tok.lexpos - if tok.type not in END_TOK_TYPES and pos >= maxcol: + if tok.type != 'SEMI' and pos >= maxcol: break - if tok.type == "BANG": - saw_macro = True - if saw_macro and tok.type not in ("NEWLINE", "DEDENT"): - toks.append(tok) - continue - if tok.type in LPARENS: - lparens.append(tok.type) - if greedy and len(lparens) > 0 and "LPAREN" in lparens: - toks.append(tok) - if tok.type == "RPAREN": - lparens.pop() - continue - if len(toks) == 0 and tok.type in BEG_TOK_SKIPS: + if len(toks) == 0 and tok.type in ('WS', 'INDENT'): continue # handle indentation - elif len(toks) > 0 and toks[-1].type in END_TOK_TYPES: - if _is_not_lparen_and_rparen(lparens, toks[-1]): - lparens.pop() # don't continue or break - elif pos < maxcol and tok.type not in ("NEWLINE", "DEDENT", "WS"): - if not greedy: - toks.clear() - if tok.type in BEG_TOK_SKIPS: - continue + elif len(toks) > 0 and toks[-1].type == 'SEMI': + if pos < maxcol and tok.type not in ('NEWLINE', 'DEDENT', 'WS'): + toks.clear() else: break if pos < mincol: continue toks.append(tok) - if tok.type == "WS" and tok.value == "\\": - pass # line continuation - elif tok.type == "NEWLINE": + if tok.type == 'NEWLINE': break - elif tok.type == "DEDENT": + elif tok.type == 'DEDENT': # fake a newline when dedenting without a newline - tok.type = "NEWLINE" - tok.value = "\n" + tok.type = 'NEWLINE' + tok.value = '\n' tok.lineno -= 1 - if len(toks) >= 2: - prev_tok_end = toks[-2].lexpos + len(toks[-2].value) - else: - prev_tok_end = len(line) - if "#" in line[prev_tok_end:]: - tok.lexpos = prev_tok_end # prevents wrapping comments - else: - tok.lexpos = len(line) + tok.lexpos = len(line) break - elif check_bad_str_token(tok): - return else: - if len(toks) > 0 and toks[-1].type in END_TOK_TYPES: - if _is_not_lparen_and_rparen(lparens, toks[-1]): - pass - elif greedy and toks[-1].type == "RPAREN": - pass - else: - toks.pop() if len(toks) == 0: return # handle comment lines + if toks[-1].type == 'SEMI': + toks.pop() tok = toks[-1] pos = tok.lexpos - if isinstance(tok.value, str): - end_offset = len(tok.value.rstrip()) + if isinstance(tok.value, string_types): + end_offset = len(tok.value) else: - el = line[pos:].split("#")[0].rstrip() + el = line[pos:].split('#')[0].rstrip() end_offset = len(el) if len(toks) == 0: return # handle comment lines - elif saw_macro or greedy: - end_offset = len(toks[-1].value.rstrip()) + 1 - if toks[0].lineno != toks[-1].lineno: - # handle multiline cases - end_offset += _offset_from_prev_lines(line, toks[-1].lineno) beg, end = toks[0].lexpos, (toks[-1].lexpos + end_offset) - end = len(line[:end].rstrip()) - rtn = "![" + line[beg:end] + "]" + rtn = '$[' + line[beg:end] + ']' if returnline: rtn = line[:beg] + rtn + line[end:] return rtn -def check_bad_str_token(tok): - """Checks if a token is a bad string.""" - if tok.type == "ERRORTOKEN" and tok.value == "EOF in multi-line string": - return True - elif isinstance(tok.value, str) and not check_quotes(tok.value): - return True - else: - return False - - -def check_quotes(s): - """Checks a string to make sure that if it starts with quotes, it also - ends with quotes. - """ - starts_as_str = RE_BEGIN_STRING.match(s) is not None - ends_as_str = s.endswith('"') or s.endswith("'") - if not starts_as_str and not ends_as_str: - ok = True - elif starts_as_str and not ends_as_str: - ok = False - elif not starts_as_str and ends_as_str: - ok = False - else: - m = RE_COMPLETE_STRING.match(s) - ok = m is not None - return ok - - -def _have_open_triple_quotes(s): - if s.count('"""') % 2 == 1: - open_triple = '"""' - elif s.count("'''") % 2 == 1: - open_triple = "'''" - else: - open_triple = False - return open_triple - - -def get_line_continuation(): - """ The line continuation characters used in subproc mode. In interactive - mode on Windows the backslash must be preceded by a space. This is because - paths on Windows may end in a backslash. - """ - if ( - ON_WINDOWS - and hasattr(builtins.__xonsh__, "env") - and builtins.__xonsh__.env.get("XONSH_INTERACTIVE", False) - ): - return " \\" - else: - return "\\" - - -def get_logical_line(lines, idx): - """Returns a single logical line (i.e. one without line continuations) - from a list of lines. This line should begin at index idx. This also - returns the number of physical lines the logical line spans. The lines - should not contain newlines - """ - n = 1 - nlines = len(lines) - linecont = get_line_continuation() - while idx > 0 and lines[idx - 1].endswith(linecont): - idx -= 1 - start = idx - line = lines[idx] - open_triple = _have_open_triple_quotes(line) - while (line.endswith(linecont) or open_triple) and idx < nlines - 1: - n += 1 - idx += 1 - if line.endswith(linecont): - line = line[:-1] + lines[idx] - else: - line = line + "\n" + lines[idx] - open_triple = _have_open_triple_quotes(line) - return line, n, start - - -def replace_logical_line(lines, logical, idx, n): - """Replaces lines at idx that may end in line continuation with a logical - line that spans n lines. - """ - linecont = get_line_continuation() - if n == 1: - lines[idx] = logical - return - space = " " - for i in range(idx, idx + n - 1): - a = len(lines[i]) - b = logical.find(space, a - 1) - if b < 0: - # no space found - lines[i] = logical - logical = "" - else: - # found space to split on - lines[i] = logical[:b] + linecont - logical = logical[b:] - lines[idx + n - 1] = logical - - -def is_balanced(expr, ltok, rtok): - """Determines whether an expression has unbalanced opening and closing tokens.""" - lcnt = expr.count(ltok) - if lcnt == 0: - return True - rcnt = expr.count(rtok) - if lcnt == rcnt: - return True - else: - return False - - def subexpr_from_unbalanced(expr, ltok, rtok): """Attempts to pull out a valid subexpression for unbalanced grouping, based on opening tokens, eg. '(', and closing tokens, eg. ')'. This does not do full tokenization, but should be good enough for tab completion. """ - if is_balanced(expr, ltok, rtok): + lcnt = expr.count(ltok) + if lcnt == 0: + return expr + rcnt = expr.count(rtok) + if lcnt == rcnt: return expr subexpr = expr.rsplit(ltok, 1)[-1] - subexpr = subexpr.rsplit(",", 1)[-1] - subexpr = subexpr.rsplit(":", 1)[-1] - return subexpr - - -def subexpr_before_unbalanced(expr, ltok, rtok): - """Obtains the expression prior to last unbalanced left token.""" - subexpr, _, post = expr.rpartition(ltok) - nrtoks_in_post = post.count(rtok) - while nrtoks_in_post != 0: - for i in range(nrtoks_in_post): - subexpr, _, post = subexpr.rpartition(ltok) - nrtoks_in_post = post.count(rtok) - _, _, subexpr = subexpr.rpartition(rtok) - _, _, subexpr = subexpr.rpartition(ltok) + subexpr = subexpr.rsplit(',', 1)[-1] + subexpr = subexpr.rsplit(':', 1)[-1] return subexpr -@lazyobject -def STARTING_WHITESPACE_RE(): - return re.compile(r"^(\s*)") - - -def starting_whitespace(s): - """Returns the whitespace at the start of a string""" - return STARTING_WHITESPACE_RE.match(s).group(1) - - def decode(s, encoding=None): encoding = encoding or DEFAULT_ENCODING return s.decode(encoding, "replace") @@ -678,36 +191,92 @@ def indent(instr, nspaces=4, ntabs=0, flatten=False): """ if instr is None: return - ind = "\t" * ntabs + " " * nspaces + ind = '\t' * ntabs + ' ' * nspaces if flatten: - pat = re.compile(r"^\s*", re.MULTILINE) + pat = re.compile(r'^\s*', re.MULTILINE) else: - pat = re.compile(r"^", re.MULTILINE) + pat = re.compile(r'^', re.MULTILINE) outstr = re.sub(pat, ind, instr) if outstr.endswith(os.linesep + ind): - return outstr[: -len(ind)] + return outstr[:-len(ind)] else: return outstr -def get_sep(): - """ Returns the appropriate filepath separator char depending on OS and - xonsh options set - """ - if ON_WINDOWS and builtins.__xonsh__.env.get("FORCE_POSIX_PATHS"): - return os.altsep - else: - return os.sep +TERM_COLORS = { + # Reset + 'NO_COLOR': '\001\033[0m\002', # Text Reset + # Regular Colors + 'BLACK': '\001\033[0;30m\002', # BLACK + 'RED': '\001\033[0;31m\002', # RED + 'GREEN': '\001\033[0;32m\002', # GREEN + 'YELLOW': '\001\033[0;33m\002', # YELLOW + 'BLUE': '\001\033[0;34m\002', # BLUE + 'PURPLE': '\001\033[0;35m\002', # PURPLE + 'CYAN': '\001\033[0;36m\002', # CYAN + 'WHITE': '\001\033[0;37m\002', # WHITE + # Bold + 'BOLD_BLACK': '\001\033[1;30m\002', # BLACK + 'BOLD_RED': '\001\033[1;31m\002', # RED + 'BOLD_GREEN': '\001\033[1;32m\002', # GREEN + 'BOLD_YELLOW': '\001\033[1;33m\002', # YELLOW + 'BOLD_BLUE': '\001\033[1;34m\002', # BLUE + 'BOLD_PURPLE': '\001\033[1;35m\002', # PURPLE + 'BOLD_CYAN': '\001\033[1;36m\002', # CYAN + 'BOLD_WHITE': '\001\033[1;37m\002', # WHITE + # Underline + 'UNDERLINE_BLACK': '\001\033[4;30m\002', # BLACK + 'UNDERLINE_RED': '\001\033[4;31m\002', # RED + 'UNDERLINE_GREEN': '\001\033[4;32m\002', # GREEN + 'UNDERLINE_YELLOW': '\001\033[4;33m\002', # YELLOW + 'UNDERLINE_BLUE': '\001\033[4;34m\002', # BLUE + 'UNDERLINE_PURPLE': '\001\033[4;35m\002', # PURPLE + 'UNDERLINE_CYAN': '\001\033[4;36m\002', # CYAN + 'UNDERLINE_WHITE': '\001\033[4;37m\002', # WHITE + # Background + 'BACKGROUND_BLACK': '\001\033[40m\002', # BLACK + 'BACKGROUND_RED': '\001\033[41m\002', # RED + 'BACKGROUND_GREEN': '\001\033[42m\002', # GREEN + 'BACKGROUND_YELLOW': '\001\033[43m\002', # YELLOW + 'BACKGROUND_BLUE': '\001\033[44m\002', # BLUE + 'BACKGROUND_PURPLE': '\001\033[45m\002', # PURPLE + 'BACKGROUND_CYAN': '\001\033[46m\002', # CYAN + 'BACKGROUND_WHITE': '\001\033[47m\002', # WHITE + # High Intensity + 'INTENSE_BLACK': '\001\033[0;90m\002', # BLACK + 'INTENSE_RED': '\001\033[0;91m\002', # RED + 'INTENSE_GREEN': '\001\033[0;92m\002', # GREEN + 'INTENSE_YELLOW': '\001\033[0;93m\002', # YELLOW + 'INTENSE_BLUE': '\001\033[0;94m\002', # BLUE + 'INTENSE_PURPLE': '\001\033[0;95m\002', # PURPLE + 'INTENSE_CYAN': '\001\033[0;96m\002', # CYAN + 'INTENSE_WHITE': '\001\033[0;97m\002', # WHITE + # Bold High Intensity + 'BOLD_INTENSE_BLACK': '\001\033[1;90m\002', # BLACK + 'BOLD_INTENSE_RED': '\001\033[1;91m\002', # RED + 'BOLD_INTENSE_GREEN': '\001\033[1;92m\002', # GREEN + 'BOLD_INTENSE_YELLOW': '\001\033[1;93m\002', # YELLOW + 'BOLD_INTENSE_BLUE': '\001\033[1;94m\002', # BLUE + 'BOLD_INTENSE_PURPLE': '\001\033[1;95m\002', # PURPLE + 'BOLD_INTENSE_CYAN': '\001\033[1;96m\002', # CYAN + 'BOLD_INTENSE_WHITE': '\001\033[1;97m\002', # WHITE + # High Intensity backgrounds + 'BACKGROUND_INTENSE_BLACK': '\001\033[0;100m\002', # BLACK + 'BACKGROUND_INTENSE_RED': '\001\033[0;101m\002', # RED + 'BACKGROUND_INTENSE_GREEN': '\001\033[0;102m\002', # GREEN + 'BACKGROUND_INTENSE_YELLOW': '\001\033[0;103m\002', # YELLOW + 'BACKGROUND_INTENSE_BLUE': '\001\033[0;104m\002', # BLUE + 'BACKGROUND_INTENSE_PURPLE': '\001\033[0;105m\002', # PURPLE + 'BACKGROUND_INTENSE_CYAN': '\001\033[0;106m\002', # CYAN + 'BACKGROUND_INTENSE_WHITE': '\001\033[0;107m\002', # WHITE +} def fallback(cond, backup): - """Decorator for returning the object if cond is true and a backup if cond - is false. + """Decorator for returning the object if cond is true and a backup if cond is false. """ - def dec(obj): return obj if cond else backup - return dec @@ -748,234 +317,93 @@ class redirect_stdout(_RedirectStream): Mostly for backwards compatibility. """ - _stream = "stdout" class redirect_stderr(_RedirectStream): """Context manager for temporarily redirecting stderr to another file.""" - _stream = "stderr" -def _yield_accessible_unix_file_names(path): - """yield file names of executable files in path.""" - if not os.path.exists(path): - return - for file_ in scandir(path): - try: - if file_.is_file() and os.access(file_.path, os.X_OK): - yield file_.name - except OSError: - # broken Symlink are neither dir not files - pass - - -def _executables_in_posix(path): - if not os.path.exists(path): - return - elif PYTHON_VERSION_INFO < (3, 5, 0): - for fname in os.listdir(path): - fpath = os.path.join(path, fname) - if ( - os.path.exists(fpath) - and os.access(fpath, os.X_OK) - and (not os.path.isdir(fpath)) - ): - yield fname - else: - yield from _yield_accessible_unix_file_names(path) - - -def _executables_in_windows(path): - if not os.path.isdir(path): - return - extensions = builtins.__xonsh__.env["PATHEXT"] - if PYTHON_VERSION_INFO < (3, 5, 0): - for fname in os.listdir(path): - fpath = os.path.join(path, fname) - if os.path.exists(fpath) and not os.path.isdir(fpath): - base_name, ext = os.path.splitext(fname) - if ext.upper() in extensions: - yield fname - else: - for x in scandir(path): - try: - is_file = x.is_file() - except OSError: - continue - if is_file: - fname = x.name - else: - continue - base_name, ext = os.path.splitext(fname) - if ext.upper() in extensions: - yield fname - - -def executables_in(path): - """Returns a generator of files in path that the user could execute. """ - if ON_WINDOWS: - func = _executables_in_windows - else: - func = _executables_in_posix - try: - yield from func(path) - except PermissionError: - return - - def command_not_found(cmd): """Uses the debian/ubuntu command-not-found utility to suggest packages for a command that cannot currently be found. """ if not ON_LINUX: - return "" - elif not os.path.isfile("/usr/lib/command-not-found"): + return '' + elif not os.path.isfile('/usr/lib/command-not-found'): # utility is not on PATH - return "" - c = "/usr/lib/command-not-found {0}; exit 0" - s = subprocess.check_output( - c.format(cmd), universal_newlines=True, stderr=subprocess.STDOUT, shell=True - ) - s = "\n".join(s.rstrip().splitlines()).strip() + return '' + c = '/usr/lib/command-not-found {0}; exit 0' + s = subprocess.check_output(c.format(cmd), universal_newlines=True, + stderr=subprocess.STDOUT, shell=True) + s = '\n'.join(s.splitlines()[:-1]).strip() return s def suggest_commands(cmd, env, aliases): """Suggests alternative commands given an environment and aliases.""" - if not env.get("SUGGEST_COMMANDS"): - return "" - thresh = env.get("SUGGEST_THRESHOLD") - max_sugg = env.get("SUGGEST_MAX_NUM") + suggest_cmds = env.get('SUGGEST_COMMANDS') + if not suggest_cmds: + return + thresh = env.get('SUGGEST_THRESHOLD') + max_sugg = env.get('SUGGEST_MAX_NUM') if max_sugg < 0: - max_sugg = float("inf") + max_sugg = float('inf') + cmd = cmd.lower() suggested = {} - - for alias in builtins.aliases: - if alias not in suggested: - if levenshtein(alias.lower(), cmd, thresh) < thresh: - suggested[alias] = "Alias" - - for path in filter(os.path.isdir, env.get("PATH")): - for _file in executables_in(path): - if ( - _file not in suggested - and levenshtein(_file.lower(), cmd, thresh) < thresh - ): - suggested[_file] = "Command ({0})".format(os.path.join(path, _file)) - - suggested = collections.OrderedDict( - sorted( - suggested.items(), key=lambda x: suggestion_sort_helper(x[0].lower(), cmd) - ) - ) + for a in builtins.aliases: + if a not in suggested: + if levenshtein(a.lower(), cmd, thresh) < thresh: + suggested[a] = 'Alias' + + for d in filter(os.path.isdir, env.get('PATH')): + for f in os.listdir(d): + if f not in suggested: + if levenshtein(f.lower(), cmd, thresh) < thresh: + fname = os.path.join(d, f) + suggested[f] = 'Command ({0})'.format(fname) + suggested = OrderedDict( + sorted(suggested.items(), + key=lambda x: suggestion_sort_helper(x[0].lower(), cmd))) num = min(len(suggested), max_sugg) if num == 0: rtn = command_not_found(cmd) else: - oneof = "" if num == 1 else "one of " - tips = "Did you mean {}the following?".format(oneof) + oneof = '' if num == 1 else 'one of ' + tips = 'Did you mean {}the following?'.format(oneof) items = list(suggested.popitem(False) for _ in range(num)) length = max(len(key) for key, _ in items) + 2 - alternatives = "\n".join( - " {: <{}} {}".format(key + ":", length, val) for key, val in items - ) - rtn = "{}\n{}".format(tips, alternatives) + alternatives = '\n'.join(' {: <{}} {}'.format(key + ":", length, val) + for key, val in items) + rtn = '{}\n{}'.format(tips, alternatives) c = command_not_found(cmd) - rtn += ("\n\n" + c) if len(c) > 0 else "" + rtn += ('\n\n' + c) if len(c) > 0 else '' return rtn -def print_exception(msg=None): +def print_exception(): """Print exceptions with/without traceback.""" - env = getattr(builtins.__xonsh__, "env", None) - # flags indicating whether the traceback options have been manually set - if env is None: - env = os_environ - manually_set_trace = "XONSH_SHOW_TRACEBACK" in env - manually_set_logfile = "XONSH_TRACEBACK_LOGFILE" in env - else: - manually_set_trace = env.is_manually_set("XONSH_SHOW_TRACEBACK") - manually_set_logfile = env.is_manually_set("XONSH_TRACEBACK_LOGFILE") - if (not manually_set_trace) and (not manually_set_logfile): - # Notify about the traceback output possibility if neither of - # the two options have been manually set - sys.stderr.write( - "xonsh: For full traceback set: " "$XONSH_SHOW_TRACEBACK = True\n" - ) - # get env option for traceback and convert it if necessary - show_trace = env.get("XONSH_SHOW_TRACEBACK", False) - if not is_bool(show_trace): - show_trace = to_bool(show_trace) - # if the trace option has been set, print all traceback info to stderr - if show_trace: - # notify user about XONSH_TRACEBACK_LOGFILE if it has - # not been set manually - if not manually_set_logfile: - sys.stderr.write( - "xonsh: To log full traceback to a file set: " - "$XONSH_TRACEBACK_LOGFILE = \n" - ) + if 'XONSH_SHOW_TRACEBACK' not in builtins.__xonsh_env__: + sys.stderr.write('xonsh: For full traceback set: ' + '$XONSH_SHOW_TRACEBACK = True\n') + if builtins.__xonsh_env__.get('XONSH_SHOW_TRACEBACK'): traceback.print_exc() - # additionally, check if a file for traceback logging has been - # specified and convert to a proper option if needed - log_file = env.get("XONSH_TRACEBACK_LOGFILE", None) - log_file = to_logfile_opt(log_file) - if log_file: - # if log_file <> '' or log_file <> None, append - # traceback log there as well - with open(os.path.abspath(log_file), "a") as f: - traceback.print_exc(file=f) - - if not show_trace: - # if traceback output is disabled, print the exception's - # error message on stderr. - display_error_message() - if msg: - msg = msg if msg.endswith("\n") else msg + "\n" - sys.stderr.write(msg) - - -def display_error_message(strip_xonsh_error_types=True): - """ - Prints the error message of the current exception on stderr. - """ - exc_type, exc_value, exc_traceback = sys.exc_info() - exception_only = traceback.format_exception_only(exc_type, exc_value) - if exc_type is XonshError and strip_xonsh_error_types: - exception_only[0] = exception_only[0].partition(": ")[-1] - sys.stderr.write("".join(exception_only)) - - -def is_writable_file(filepath): - """ - Checks if a filepath is valid for writing. - """ - filepath = expand_path(filepath) - # convert to absolute path if needed - if not os.path.isabs(filepath): - filepath = os.path.abspath(filepath) - # cannot write to directories - if os.path.isdir(filepath): - return False - # if the file exists and is writable, we're fine - if os.path.exists(filepath): - return True if os.access(filepath, os.W_OK) else False - # if the path doesn't exist, isolate its directory component - # and ensure that directory is writable instead - return os.access(os.path.dirname(filepath), os.W_OK) + else: + exc_type, exc_value, exc_traceback = sys.exc_info() + exception_only = traceback.format_exception_only(exc_type, exc_value) + sys.stderr.write(''.join(exception_only)) # Modified from Public Domain code, by Magnus Lie Hetland # from http://hetland.org/coding/python/levenshtein.py -def levenshtein(a, b, max_dist=float("inf")): +def levenshtein(a, b, max_dist=float('inf')): """Calculates the Levenshtein distance between a and b.""" n, m = len(a), len(b) if abs(n - m) > max_dist: - return float("inf") + return float('inf') if n > m: # Make sure n <= m, to use O(min(n,m)) space a, b = b, a @@ -1003,60 +431,24 @@ def suggestion_sort_helper(x, y): return lendiff + inx + iny -def escape_windows_cmd_string(s): - """Returns a string that is usable by the Windows cmd.exe. - The escaping is based on details here and empirical testing: +def escape_windows_title_string(s): + """Returns a string that is usable by the Windows cmd.exe title + builtin. The escaping is based on details here and emperical testing: http://www.robvanderwoude.com/escapechars.php """ - for c in '^()%!<>&|"': - s = s.replace(c, "^" + c) + for c in '^&<>|': + s = s.replace(c, '^' + c) + s = s.replace('/?', '/.') return s -def argvquote(arg, force=False): - """ Returns an argument quoted in such a way that that CommandLineToArgvW - on Windows will return the argument string unchanged. - This is the same thing Popen does when supplied with an list of arguments. - Arguments in a command line should be separated by spaces; this - function does not add these spaces. This implementation follows the - suggestions outlined here: - https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/ - """ - if not force and len(arg) != 0 and not any([c in arg for c in ' \t\n\v"']): - return arg - else: - n_backslashes = 0 - cmdline = '"' - for c in arg: - if c == "\\": - # first count the number of current backslashes - n_backslashes += 1 - continue - if c == '"': - # Escape all backslashes and the following double quotation mark - cmdline += (n_backslashes * 2 + 1) * "\\" - else: - # backslashes are not special here - cmdline += n_backslashes * "\\" - n_backslashes = 0 - cmdline += c - # Escape all backslashes, but let the terminating - # double quotation mark we add below be interpreted - # as a metacharacter - cmdline += +n_backslashes * 2 * "\\" + '"' - return cmdline - - def on_main_thread(): """Checks if we are on the main thread or not.""" return threading.current_thread() is threading.main_thread() -_DEFAULT_SENTINEL = object() - - -@contextlib.contextmanager -def swap(namespace, name, value, default=_DEFAULT_SENTINEL): +@contextmanager +def swap(namespace, name, value, default=NotImplemented): """Swaps a current variable name in a namespace for another value, and then replaces it when the context is exited. """ @@ -1068,32 +460,11 @@ def swap(namespace, name, value, default=_DEFAULT_SENTINEL): else: setattr(namespace, name, old) - -@contextlib.contextmanager -def swap_values(d, updates, default=_DEFAULT_SENTINEL): - """Updates a dictionary (or other mapping) with values from another mapping, - and then restores the original mapping when the context is exited. - """ - old = {k: d.get(k, default) for k in updates} - d.update(updates) - yield - for k, v in old.items(): - if v is default and k in d: - del d[k] - else: - d[k] = v - - # -# Validators and converters +# Validators and contervers # -def detype(x): - """This assumes that the object has a detype method, and calls that.""" - return x.detype() - - def is_int(x): """Tests if something is an integer""" return isinstance(x, int) @@ -1106,27 +477,7 @@ def is_float(x): def is_string(x): """Tests if something is a string""" - return isinstance(x, str) - - -def is_slice(x): - """Tests if something is a slice""" - return isinstance(x, slice) - - -def is_callable(x): - """Tests if something is callable""" - return callable(x) - - -def is_string_or_callable(x): - """Tests if something is a string or callable""" - return is_string(x) or is_callable(x) - - -def is_class(x): - """Tests if something is a class""" - return isinstance(x, type) + return isinstance(x, string_types) def always_true(x): @@ -1139,33 +490,32 @@ def always_false(x): return False -def always_none(x): - """Returns None""" - return None - - def ensure_string(x): """Returns a string if x is not a string, and x if it already is.""" - return str(x) + if isinstance(x, string_types): + return x + else: + return str(x) def is_env_path(x): """This tests if something is an environment path, ie a list of strings.""" - return isinstance(x, EnvPath) + if isinstance(x, string_types): + return False + else: + return (isinstance(x, Sequence) and + all([isinstance(a, string_types) for a in x])) def str_to_env_path(x): """Converts a string to an environment path, ie a list of strings, splitting on the OS separator. """ - # splitting will be done implicitly in EnvPath's __init__ - return EnvPath(x) + return x.split(os.pathsep) def env_path_to_str(x): - """Converts an environment path to a string by joining on the OS - separator. - """ + """Converts an environment path to a string by joining on the OS separator.""" return os.pathsep.join(x) @@ -1174,392 +524,35 @@ def is_bool(x): return isinstance(x, bool) -def is_logfile_opt(x): - """ - Checks if x is a valid $XONSH_TRACEBACK_LOGFILE option. Returns False - if x is not a writable/creatable file or an empty string or None. - """ - if x is None: - return True - if not isinstance(x, str): - return False - else: - return is_writable_file(x) or x == "" - - -def to_logfile_opt(x): - """ - Converts a $XONSH_TRACEBACK_LOGFILE option to either a str containing - the filepath if it is a writable file or None if the filepath is not - valid, informing the user on stderr about the invalid choice. - """ - superclass = pathlib.PurePath - if PYTHON_VERSION_INFO >= (3, 6, 0): - superclass = os.PathLike - if isinstance(x, superclass): - x = str(x) - if is_logfile_opt(x): - return x - else: - # if option is not valid, return a proper - # option and inform the user on stderr - sys.stderr.write( - "xonsh: $XONSH_TRACEBACK_LOGFILE must be a " - "filepath pointing to a file that either exists " - "and is writable or that can be created.\n" - ) - return None - - -def logfile_opt_to_str(x): - """ - Detypes a $XONSH_TRACEBACK_LOGFILE option. - """ - if x is None: - # None should not be detyped to 'None', as 'None' constitutes - # a perfectly valid filename and retyping it would introduce - # ambiguity. Detype to the empty string instead. - return "" - return str(x) - - -_FALSES = LazyObject( - lambda: frozenset(["", "0", "n", "f", "no", "none", "false", "off"]), - globals(), - "_FALSES", -) - +_FALSES = frozenset(['', '0', 'n', 'f', 'no', 'none', 'false']) def to_bool(x): """"Converts to a boolean in a semantically meaningful way.""" if isinstance(x, bool): return x - elif isinstance(x, str): + elif isinstance(x, string_types): return False if x.lower() in _FALSES else True else: return bool(x) -def to_itself(x): - """No conversion, returns itself.""" - return x - - def bool_to_str(x): - """Converts a bool to an empty string if False and the string '1' if - True. - """ - return "1" if x else "" - - -_BREAKS = LazyObject( - lambda: frozenset(["b", "break", "s", "skip", "q", "quit"]), globals(), "_BREAKS" -) - - -def to_bool_or_break(x): - if isinstance(x, str) and x.lower() in _BREAKS: - return "break" - else: - return to_bool(x) - - -def is_bool_or_int(x): - """Returns whether a value is a boolean or integer.""" - return is_bool(x) or is_int(x) - - -def to_bool_or_int(x): - """Converts a value to a boolean or an integer.""" - if isinstance(x, str): - return int(x) if x.isdigit() else to_bool(x) - elif is_int(x): # bools are ints too! - return x - else: - return bool(x) - - -def bool_or_int_to_str(x): - """Converts a boolean or integer to a string.""" - return bool_to_str(x) if is_bool(x) else str(x) - - -@lazyobject -def SLICE_REG(): - return re.compile( - r"(?P(?:-\d)?\d*):(?P(?:-\d)?\d*):?(?P(?:-\d)?\d*)" - ) + """Converts a bool to an empty string if False and the string '1' if True.""" + return '1' if x else '' -def ensure_slice(x): - """Try to convert an object into a slice, complain on failure""" - if not x and x != 0: +def ensure_int_or_slice(x): + """Makes sure that x is list-indexable.""" + if x is None: return slice(None) - elif is_slice(x): + elif is_int(x): return x - try: - x = int(x) - if x != -1: - s = slice(x, x + 1) - else: - s = slice(-1, None, None) - except ValueError: - x = x.strip("[]()") - m = SLICE_REG.fullmatch(x) - if m: - groups = (int(i) if i else None for i in m.groups()) - s = slice(*groups) - else: - raise ValueError("cannot convert {!r} to slice".format(x)) - except TypeError: - try: - s = slice(*(int(i) for i in x)) - except (TypeError, ValueError): - raise ValueError("cannot convert {!r} to slice".format(x)) - return s - - -def get_portions(it, slices): - """Yield from portions of an iterable. - - Parameters - ---------- - it: iterable - slices: a slice or a list of slice objects - """ - if is_slice(slices): - slices = [slices] - if len(slices) == 1: - s = slices[0] - try: - yield from itertools.islice(it, s.start, s.stop, s.step) - return - except ValueError: # islice failed - pass - it = list(it) - for s in slices: - yield from it[s] - - -def is_slice_as_str(x): - """ - Test if string x is a slice. If not a string return False. - """ - try: - x = x.strip("[]()") - m = SLICE_REG.fullmatch(x) - if m: - return True - except AttributeError: - pass - return False - - -def is_int_as_str(x): - """ - Test if string x is an integer. If not a string return False. - """ - try: - return x.isdecimal() - except AttributeError: - return False - - -def is_string_set(x): - """Tests if something is a set of strings""" - return isinstance(x, cabc.Set) and all(isinstance(a, str) for a in x) - - -def csv_to_set(x): - """Convert a comma-separated list of strings to a set of strings.""" - if not x: - return set() - else: - return set(x.split(",")) - - -def set_to_csv(x): - """Convert a set of strings to a comma-separated list of strings.""" - return ",".join(x) - - -def pathsep_to_set(x): - """Converts a os.pathsep separated string to a set of strings.""" - if not x: - return set() - else: - return set(x.split(os.pathsep)) - - -def set_to_pathsep(x, sort=False): - """Converts a set to an os.pathsep separated string. The sort kwarg - specifies whether to sort the set prior to str conversion. - """ - if sort: - x = sorted(x) - return os.pathsep.join(x) - - -def is_string_seq(x): - """Tests if something is a sequence of strings""" - return isinstance(x, cabc.Sequence) and all(isinstance(a, str) for a in x) - - -def is_nonstring_seq_of_strings(x): - """Tests if something is a sequence of strings, where the top-level - sequence is not a string itself. - """ - return ( - isinstance(x, cabc.Sequence) - and not isinstance(x, str) - and all(isinstance(a, str) for a in x) - ) - - -def pathsep_to_seq(x): - """Converts a os.pathsep separated string to a sequence of strings.""" - if not x: - return [] - else: - return x.split(os.pathsep) - - -def seq_to_pathsep(x): - """Converts a sequence to an os.pathsep separated string.""" - return os.pathsep.join(x) - - -def pathsep_to_upper_seq(x): - """Converts a os.pathsep separated string to a sequence of - uppercase strings. - """ - if not x: - return [] - else: - return x.upper().split(os.pathsep) - - -def seq_to_upper_pathsep(x): - """Converts a sequence to an uppercase os.pathsep separated string.""" - return os.pathsep.join(x).upper() - - -def is_bool_seq(x): - """Tests if an object is a sequence of bools.""" - return isinstance(x, cabc.Sequence) and all(isinstance(y, bool) for y in x) - - -def csv_to_bool_seq(x): - """Takes a comma-separated string and converts it into a list of bools.""" - return [to_bool(y) for y in csv_to_set(x)] - - -def bool_seq_to_csv(x): - """Converts a sequence of bools to a comma-separated string.""" - return ",".join(map(str, x)) - - -def ptk2_color_depth_setter(x): - """ Setter function for $PROMPT_TOOLKIT_COLOR_DEPTH. Also - updates os.environ so prompt toolkit can pickup the value. - """ - x = str(x) - if x in { - "DEPTH_1_BIT", - "MONOCHROME", - "DEPTH_4_BIT", - "ANSI_COLORS_ONLY", - "DEPTH_8_BIT", - "DEFAULT", - "DEPTH_24_BIT", - "TRUE_COLOR", - }: - pass - elif x in {"", None}: - x = "" + # must have a string from here on + if ':' in x: + x = x.strip('[]()') + return slice(*(int(x) if len(x) > 0 else None for x in x.split(':'))) else: - msg = '"{}" is not a valid value for $PROMPT_TOOLKIT_COLOR_DEPTH. '.format(x) - warnings.warn(msg, RuntimeWarning) - x = "" - if x == "" and "PROMPT_TOOLKIT_COLOR_DEPTH" in os_environ: - del os_environ["PROMPT_TOOLKIT_COLOR_DEPTH"] - else: - os_environ["PROMPT_TOOLKIT_COLOR_DEPTH"] = x - return x - - -def is_completions_display_value(x): - return x in {"none", "single", "multi"} - - -def to_completions_display_value(x): - x = str(x).lower() - if x in {"none", "false"}: - x = "none" - elif x in {"multi", "true"}: - x = "multi" - elif x in {"single", "readline"}: - pass - else: - msg = '"{}" is not a valid value for $COMPLETIONS_DISPLAY. '.format(x) - msg += 'Using "multi".' - warnings.warn(msg, RuntimeWarning) - x = "multi" - return x - - -def is_str_str_dict(x): - """Tests if something is a str:str dictionary""" - return isinstance(x, dict) and all( - isinstance(k, str) and isinstance(v, str) for k, v in x.items() - ) - - -def to_dict(x): - """Converts a string to a dictionary""" - if isinstance(x, dict): - return x - try: - x = ast.literal_eval(x) - except (ValueError, SyntaxError): - msg = '"{}" can not be converted to Python dictionary.'.format(x) - warnings.warn(msg, RuntimeWarning) - x = dict() - return x - - -def to_str_str_dict(x): - """Converts a string to str:str dictionary""" - if is_str_str_dict(x): - return x - x = to_dict(x) - if not is_str_str_dict(x): - msg = '"{}" can not be converted to str:str dictionary.'.format(x) - warnings.warn(msg, RuntimeWarning) - x = dict() - return x - - -def dict_to_str(x): - """Converts a dictionary to a string""" - if not x or len(x) == 0: - return "" - return str(x) - - -def setup_win_unicode_console(enable): - """"Enables or disables unicode display on windows.""" - try: - import win_unicode_console - except ImportError: - win_unicode_console = False - enable = to_bool(enable) - if ON_WINDOWS and win_unicode_console: - if enable: - win_unicode_console.enable() - else: - win_unicode_console.disable() - return enable + return int(x) # history validation @@ -1574,135 +567,80 @@ def setup_win_unicode_console(enable): _gb_to_b = lambda x: 1024 * _mb_to_b(x) _tb_to_b = lambda x: 1024 * _tb_to_b(x) -CANON_HISTORY_UNITS = LazyObject( - lambda: frozenset(["commands", "files", "s", "b"]), globals(), "CANON_HISTORY_UNITS" -) - -HISTORY_UNITS = LazyObject( - lambda: { - "": ("commands", int), - "c": ("commands", int), - "cmd": ("commands", int), - "cmds": ("commands", int), - "command": ("commands", int), - "commands": ("commands", int), - "f": ("files", int), - "files": ("files", int), - "s": ("s", float), - "sec": ("s", float), - "second": ("s", float), - "seconds": ("s", float), - "m": ("s", _min_to_sec), - "min": ("s", _min_to_sec), - "mins": ("s", _min_to_sec), - "h": ("s", _hour_to_sec), - "hr": ("s", _hour_to_sec), - "hour": ("s", _hour_to_sec), - "hours": ("s", _hour_to_sec), - "d": ("s", _day_to_sec), - "day": ("s", _day_to_sec), - "days": ("s", _day_to_sec), - "mon": ("s", _month_to_sec), - "month": ("s", _month_to_sec), - "months": ("s", _month_to_sec), - "y": ("s", _year_to_sec), - "yr": ("s", _year_to_sec), - "yrs": ("s", _year_to_sec), - "year": ("s", _year_to_sec), - "years": ("s", _year_to_sec), - "b": ("b", int), - "byte": ("b", int), - "bytes": ("b", int), - "kb": ("b", _kb_to_b), - "kilobyte": ("b", _kb_to_b), - "kilobytes": ("b", _kb_to_b), - "mb": ("b", _mb_to_b), - "meg": ("b", _mb_to_b), - "megs": ("b", _mb_to_b), - "megabyte": ("b", _mb_to_b), - "megabytes": ("b", _mb_to_b), - "gb": ("b", _gb_to_b), - "gig": ("b", _gb_to_b), - "gigs": ("b", _gb_to_b), - "gigabyte": ("b", _gb_to_b), - "gigabytes": ("b", _gb_to_b), - "tb": ("b", _tb_to_b), - "terabyte": ("b", _tb_to_b), - "terabytes": ("b", _tb_to_b), - }, - globals(), - "HISTORY_UNITS", -) +CANON_HISTORY_UNITS = frozenset(['commands', 'files', 's', 'b']) + +HISTORY_UNITS = { + '': ('commands', int), + 'c': ('commands', int), + 'cmd': ('commands', int), + 'cmds': ('commands', int), + 'command': ('commands', int), + 'commands': ('commands', int), + 'f': ('files', int), + 'files': ('files', int), + 's': ('s', float), + 'sec': ('s', float), + 'second': ('s', float), + 'seconds': ('s', float), + 'm': ('s', _min_to_sec), + 'min': ('s', _min_to_sec), + 'mins': ('s', _min_to_sec), + 'h': ('s', _hour_to_sec), + 'hr': ('s', _hour_to_sec), + 'hour': ('s', _hour_to_sec), + 'hours': ('s', _hour_to_sec), + 'd': ('s', _day_to_sec), + 'day': ('s', _day_to_sec), + 'days': ('s', _day_to_sec), + 'mon': ('s', _month_to_sec), + 'month': ('s', _month_to_sec), + 'months': ('s', _month_to_sec), + 'y': ('s', _year_to_sec), + 'yr': ('s', _year_to_sec), + 'yrs': ('s', _year_to_sec), + 'year': ('s', _year_to_sec), + 'years': ('s', _year_to_sec), + 'b': ('b', int), + 'byte': ('b', int), + 'bytes': ('b', int), + 'kb': ('b', _kb_to_b), + 'kilobyte': ('b', _kb_to_b), + 'kilobytes': ('b', _kb_to_b), + 'mb': ('b', _mb_to_b), + 'meg': ('b', _mb_to_b), + 'megs': ('b', _mb_to_b), + 'megabyte': ('b', _mb_to_b), + 'megabytes': ('b', _mb_to_b), + 'gb': ('b', _gb_to_b), + 'gig': ('b', _gb_to_b), + 'gigs': ('b', _gb_to_b), + 'gigabyte': ('b', _gb_to_b), + 'gigabytes': ('b', _gb_to_b), + 'tb': ('b', _tb_to_b), + 'terabyte': ('b', _tb_to_b), + 'terabytes': ('b', _tb_to_b), + } """Maps lowercase unit names to canonical name and conversion utilities.""" - def is_history_tuple(x): """Tests if something is a proper history value, units tuple.""" - if ( - isinstance(x, cabc.Sequence) - and len(x) == 2 - and isinstance(x[0], (int, float)) - and x[1].lower() in CANON_HISTORY_UNITS - ): - return True + if isinstance(x, Sequence) and len(x) == 2 and isinstance(x[0], (int, float)) \ + and x[1].lower() in CANON_HISTORY_UNITS: + return True return False -def is_history_backend(x): - """Tests if something is a valid history backend.""" - return is_string(x) or is_class(x) or isinstance(x, object) - - -def is_dynamic_cwd_width(x): - """ Determine if the input is a valid input for the DYNAMIC_CWD_WIDTH - environment variable. - """ - return ( - isinstance(x, tuple) - and len(x) == 2 - and isinstance(x[0], float) - and x[1] in set("c%") - ) - - -def to_dynamic_cwd_tuple(x): - """Convert to a canonical cwd_width tuple.""" - unit = "c" - if isinstance(x, str): - if x[-1] == "%": - x = x[:-1] - unit = "%" - else: - unit = "c" - return (float(x), unit) - else: - return (float(x[0]), x[1]) - - -def dynamic_cwd_tuple_to_str(x): - """Convert a canonical cwd_width tuple to a string.""" - if x[1] == "%": - return str(x[0]) + "%" - else: - return str(x[0]) - - -RE_HISTORY_TUPLE = LazyObject( - lambda: re.compile(r"([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*([A-Za-z]*)"), - globals(), - "RE_HISTORY_TUPLE", -) - +RE_HISTORY_TUPLE = re.compile('([-+]?[0-9]*\.?[0-9]+([eE][-+]?[0-9]+)?)\s*([A-Za-z]*)') def to_history_tuple(x): - """Converts to a canonical history tuple.""" - if not isinstance(x, (cabc.Sequence, float, int)): - raise ValueError("history size must be given as a sequence or number") + """Converts to a canonincal history tuple.""" + if not isinstance(x, (Sequence, float, int)): + raise ValueError('history size must be given as a sequence or number') if isinstance(x, str): - m = RE_HISTORY_TUPLE.match(x.strip().lower()) + m = RE_HISTORY_TUPLE.match(x.strip()) return to_history_tuple((m.group(1), m.group(3))) elif isinstance(x, (float, int)): - return to_history_tuple((x, "commands")) + return to_history_tuple((x, 'commands')) units, converter = HISTORY_UNITS[x[1]] value = converter(x[0]) return (value, units) @@ -1710,739 +648,92 @@ def to_history_tuple(x): def history_tuple_to_str(x): """Converts a valid history tuple to a canonical string.""" - return "{0} {1}".format(*x) - - -def format_color(string, **kwargs): - """Formats strings that may contain colors. This simply dispatches to the - shell instances method of the same name. The results of this function should - be directly usable by print_color(). - """ - return builtins.__xonsh__.shell.shell.format_color(string, **kwargs) - - -def print_color(string, **kwargs): - """Prints a string that may contain colors. This dispatched to the shell - method of the same name. Colors will be formatted if they have not already - been. - """ - builtins.__xonsh__.shell.shell.print_color(string, **kwargs) - - -def color_style_names(): - """Returns an iterable of all available style names.""" - return builtins.__xonsh__.shell.shell.color_style_names() - - -def color_style(): - """Returns the current color map.""" - return builtins.__xonsh__.shell.shell.color_style() - - -def _token_attr_from_stylemap(stylemap): - """yields tokens attr, and index from a stylemap """ - import prompt_toolkit as ptk - - if builtins.__xonsh__.shell.shell_type == "prompt_toolkit1": - style = ptk.styles.style_from_dict(stylemap) - for token in stylemap: - yield token, style.token_to_attrs[token] - else: - style = ptk.styles.style_from_pygments_dict(stylemap) - for token in stylemap: - style_str = "class:{}".format( - ptk.styles.pygments.pygments_token_to_classname(token) - ) - yield (token, style.get_attrs_for_style_str(style_str)) - - -def _get_color_lookup_table(): - """Returns the prompt_toolkit win32 ColorLookupTable """ - if builtins.__xonsh__.shell.shell_type == "prompt_toolkit1": - from prompt_toolkit.terminal.win32_output import ColorLookupTable - else: - from prompt_toolkit.output.win32 import ColorLookupTable - return ColorLookupTable() - - -def _get_color_indexes(style_map): - """Generates the color and windows color index for a style """ - table = _get_color_lookup_table() - for token, attr in _token_attr_from_stylemap(style_map): - if attr.color: - index = table.lookup_fg_color(attr.color) - try: - rgb = ( - int(attr.color[0:2], 16), - int(attr.color[2:4], 16), - int(attr.color[4:6], 16), - ) - except Exception: - rgb = None - yield token, index, rgb - - -# Map of new PTK2 color names to PTK1 variants -PTK_NEW_OLD_COLOR_MAP = LazyObject( - lambda: { - "black": "black", - "red": "darkred", - "green": "darkgreen", - "yellow": "brown", - "blue": "darkblue", - "magenta": "purple", - "cyan": "teal", - "gray": "lightgray", - "brightblack": "darkgray", - "brightred": "red", - "brightgreen": "green", - "brightyellow": "yellow", - "brightblue": "blue", - "brightmagenta": "fuchsia", - "brightcyan": "turquoise", - "white": "white", - }, - globals(), - "PTK_NEW_OLD_COLOR_MAP", -) - -# Map of new ansicolor names to old PTK1 names -ANSICOLOR_NAMES_MAP = LazyObject( - lambda: {"ansi" + k: "#ansi" + v for k, v in PTK_NEW_OLD_COLOR_MAP.items()}, - globals(), - "ANSICOLOR_NAMES_MAP", -) - - -def _win10_color_map(): - cmap = { - "ansiblack": (12, 12, 12), - "ansiblue": (0, 55, 218), - "ansigreen": (19, 161, 14), - "ansicyan": (58, 150, 221), - "ansired": (197, 15, 31), - "ansimagenta": (136, 23, 152), - "ansiyellow": (193, 156, 0), - "ansigray": (204, 204, 204), - "ansibrightblack": (118, 118, 118), - "ansibrightblue": (59, 120, 255), - "ansibrightgreen": (22, 198, 12), - "ansibrightcyan": (97, 214, 214), - "ansibrightred": (231, 72, 86), - "ansibrightmagenta": (180, 0, 158), - "ansibrightyellow": (249, 241, 165), - "ansiwhite": (242, 242, 242), - } - return { - k: "#{0:02x}{1:02x}{2:02x}".format(r, g, b) for k, (r, g, b) in cmap.items() - } - - -WIN10_COLOR_MAP = LazyObject(_win10_color_map, globals(), "WIN10_COLOR_MAP") - - -def _win_bold_color_map(): - """ Map dark ansi colors to lighter version. """ - return { - "ansiblack": "ansibrightblack", - "ansiblue": "ansibrightblue", - "ansigreen": "ansibrightgreen", - "ansicyan": "ansibrightcyan", - "ansired": "ansibrightred", - "ansimagenta": "ansibrightmagenta", - "ansiyellow": "ansibrightyellow", - "ansigray": "ansiwhite", - } - - -WIN_BOLD_COLOR_MAP = LazyObject(_win_bold_color_map, globals(), "WIN_BOLD_COLOR_MAP") - - -def hardcode_colors_for_win10(style_map): - """Replace all ansi colors with hardcoded colors to avoid unreadable defaults - in conhost.exe - """ - modified_style = {} - if not builtins.__xonsh__.env["PROMPT_TOOLKIT_COLOR_DEPTH"]: - builtins.__xonsh__.env["PROMPT_TOOLKIT_COLOR_DEPTH"] = "DEPTH_24_BIT" - # Replace all ansi colors with hardcoded colors to avoid unreadable defaults - # in conhost.exe - for token, style_str in style_map.items(): - for ansicolor in WIN10_COLOR_MAP: - if ansicolor in style_str: - if "bold" in style_str and "nobold" not in style_str: - # Win10 doesn't yet handle bold colors. Instead dark - # colors are mapped to their lighter version. We simulate - # the same here. - style_str.replace("bold", "") - hexcolor = WIN10_COLOR_MAP[ - WIN_BOLD_COLOR_MAP.get(ansicolor, ansicolor) - ] - else: - hexcolor = WIN10_COLOR_MAP[ansicolor] - style_str = style_str.replace(ansicolor, hexcolor) - modified_style[token] = style_str - return modified_style - - -def ansicolors_to_ptk1_names(stylemap): - """Converts ansicolor names in a stylemap to old PTK1 color names - """ - modified_stylemap = {} - for token, style_str in stylemap.items(): - for color, ptk1_color in ANSICOLOR_NAMES_MAP.items(): - if "#" + color not in style_str: - style_str = style_str.replace(color, ptk1_color) - modified_stylemap[token] = style_str - return modified_stylemap - - -def intensify_colors_for_cmd_exe(style_map): - """Returns a modified style to where colors that maps to dark - colors are replaced with brighter versions. - """ - modified_style = {} - replace_colors = { - 1: "ansibrightcyan", # subst blue with bright cyan - 2: "ansibrightgreen", # subst green with bright green - 4: "ansibrightred", # subst red with bright red - 5: "ansibrightmagenta", # subst magenta with bright magenta - 6: "ansibrightyellow", # subst yellow with bright yellow - 9: "ansicyan", # subst intense blue with dark cyan (more readable) - } - if builtins.__xonsh__.shell.shell_type == "prompt_toolkit1": - replace_colors = ansicolors_to_ptk1_names(replace_colors) - for token, idx, _ in _get_color_indexes(style_map): - if idx in replace_colors: - modified_style[token] = replace_colors[idx] - return modified_style - - -def intensify_colors_on_win_setter(enable): - """Resets the style when setting the INTENSIFY_COLORS_ON_WIN - environment variable. - """ - enable = to_bool(enable) - if builtins.__xonsh__.shell is not None: - if hasattr(builtins.__xonsh__.shell.shell.styler, "style_name"): - delattr(builtins.__xonsh__.shell.shell.styler, "style_name") - return enable - - -def format_std_prepost(template, env=None): - """Formats a template prefix/postfix string for a standard buffer. - Returns a string suitable for prepending or appending. - """ - if not template: - return "" - env = builtins.__xonsh__.env if env is None else env - shell = builtins.__xonsh__.shell.shell - try: - s = shell.prompt_formatter(template) - except Exception: - print_exception() - # \001\002 is there to fool pygments into not returning an empty string - # for potentially empty input. This happens when the template is just a - # color code with no visible text. - invis = "\001\002" - s = shell.format_color(invis + s + invis, force_string=True) - s = s.replace(invis, "") - return s - - -_RE_STRING_START = "[bBprRuUf]*" -_RE_STRING_TRIPLE_DOUBLE = '"""' -_RE_STRING_TRIPLE_SINGLE = "'''" -_RE_STRING_DOUBLE = '"' -_RE_STRING_SINGLE = "'" -_STRINGS = ( - _RE_STRING_TRIPLE_DOUBLE, - _RE_STRING_TRIPLE_SINGLE, - _RE_STRING_DOUBLE, - _RE_STRING_SINGLE, -) -RE_BEGIN_STRING = LazyObject( - lambda: re.compile("(" + _RE_STRING_START + "(" + "|".join(_STRINGS) + "))"), - globals(), - "RE_BEGIN_STRING", -) -"""Regular expression matching the start of a string, including quotes and -leading characters (r, b, or u)""" - -RE_STRING_START = LazyObject( - lambda: re.compile(_RE_STRING_START), globals(), "RE_STRING_START" -) -"""Regular expression matching the characters before the quotes when starting a -string (r, b, or u, case insensitive)""" - -RE_STRING_CONT = LazyDict( - { - '"': lambda: re.compile(r'((\\(.|\n))|([^"\\]))*'), - "'": lambda: re.compile(r"((\\(.|\n))|([^'\\]))*"), - '"""': lambda: re.compile(r'((\\(.|\n))|([^"\\])|("(?!""))|\n)*'), - "'''": lambda: re.compile(r"((\\(.|\n))|([^'\\])|('(?!''))|\n)*"), - }, - globals(), - "RE_STRING_CONT", -) -"""Dictionary mapping starting quote sequences to regular expressions that -match the contents of a string beginning with those quotes (not including the -terminating quotes)""" - - -@lazyobject -def RE_COMPLETE_STRING(): - ptrn = ( - "^" - + _RE_STRING_START - + "(?P" - + "|".join(_STRINGS) - + ")" - + ".*?(?P=quote)$" - ) - return re.compile(ptrn, re.DOTALL) - - -def strip_simple_quotes(s): - """Gets rid of single quotes, double quotes, single triple quotes, and - single double quotes from a string, if present front and back of a string. - Otherwiswe, does nothing. - """ - starts_single = s.startswith("'") - starts_double = s.startswith('"') - if not starts_single and not starts_double: - return s - elif starts_single: - ends_single = s.endswith("'") - if not ends_single: - return s - elif s.startswith("'''") and s.endswith("'''") and len(s) >= 6: - return s[3:-3] - elif len(s) >= 2: - return s[1:-1] - else: - return s - else: - # starts double - ends_double = s.endswith('"') - if not ends_double: - return s - elif s.startswith('"""') and s.endswith('"""') and len(s) >= 6: - return s[3:-3] - elif len(s) >= 2: - return s[1:-1] - else: - return s - - -def check_for_partial_string(x): - """Returns the starting index (inclusive), ending index (exclusive), and - starting quote string of the most recent Python string found in the input. - - check_for_partial_string(x) -> (startix, endix, quote) - - Parameters - ---------- - x : str - The string to be checked (representing a line of terminal input) - - Returns - ------- - startix : int (or None) - The index where the most recent Python string found started - (inclusive), or None if no strings exist in the input - - endix : int (or None) - The index where the most recent Python string found ended (exclusive), - or None if no strings exist in the input OR if the input ended in the - middle of a Python string - - quote : str (or None) - A string containing the quote used to start the string (e.g., b", ", - '''), or None if no string was found. - """ - string_indices = [] - starting_quote = [] - current_index = 0 - match = re.search(RE_BEGIN_STRING, x) - while match is not None: - # add the start in - start = match.start() - quote = match.group(0) - lenquote = len(quote) - current_index += start - # store the starting index of the string, as well as the - # characters in the starting quotes (e.g., ", ', """, r", etc) - string_indices.append(current_index) - starting_quote.append(quote) - # determine the string that should terminate this string - ender = re.sub(RE_STRING_START, "", quote) - x = x[start + lenquote :] - current_index += lenquote - # figure out what is inside the string - continuer = RE_STRING_CONT[ender] - contents = re.match(continuer, x) - inside = contents.group(0) - leninside = len(inside) - current_index += contents.start() + leninside + len(ender) - # if we are not at the end of the input string, add the ending index of - # the string to string_indices - if contents.end() < len(x): - string_indices.append(current_index) - x = x[leninside + len(ender) :] - # find the next match - match = re.search(RE_BEGIN_STRING, x) - numquotes = len(string_indices) - if numquotes == 0: - return (None, None, None) - elif numquotes % 2: - return (string_indices[-1], None, starting_quote[-1]) - else: - return (string_indices[-2], string_indices[-1], starting_quote[-1]) - - -# regular expressions for matching environment variables -# i.e $FOO, ${'FOO'} -@lazyobject -def POSIX_ENVVAR_REGEX(): - pat = r"""\$({(?P['"])|)(?P\w+)((?P=quote)}|(?:\1\b))""" - return re.compile(pat) - - -def expandvars(path): - """Expand shell variables of the forms $var, ${var} and %var%. - Unknown variables are left unchanged.""" - env = builtins.__xonsh__.env - if isinstance(path, bytes): - path = path.decode( - encoding=env.get("XONSH_ENCODING"), errors=env.get("XONSH_ENCODING_ERRORS") - ) - elif isinstance(path, pathlib.Path): - # get the path's string representation - path = str(path) - if "$" in path: - for match in POSIX_ENVVAR_REGEX.finditer(path): - name = match.group("envvar") - if name in env: - ensurer = env.get_ensurer(name) - val = env[name] - value = str(val) if ensurer.detype is None else ensurer.detype(val) - value = str(val) if value is None else value - path = POSIX_ENVVAR_REGEX.sub(value, path, count=1) - return path - + return '{0} {1}'.format(*x) # -# File handling tools +# prompt toolkit tools # -def backup_file(fname): - """Moves an existing file to a new name that has the current time right - before the extension. - """ - # lazy imports - import shutil - from datetime import datetime - - base, ext = os.path.splitext(fname) - timestamp = datetime.now().strftime("%Y-%m-%d-%H-%M-%S-%f") - newfname = "%s.%s%s" % (base, timestamp, ext) - shutil.move(fname, newfname) - - -def normabspath(p): - """Returns as normalized absolute path, namely, normcase(abspath(p))""" - return os.path.normcase(os.path.abspath(p)) - +class FakeChar(str): + """Class that holds a single char and escape sequences that surround it. -def expanduser_abs_path(inp): - """ Provides user expanded absolute path """ - return os.path.abspath(expanduser(inp)) + It is used as a workaround for the fact that prompt_toolkit doesn't display + colorful prompts correctly. + It behaves like normal string created with prefix + char + suffix, but has + two differences: + * len() always returns 2 -WINDOWS_DRIVE_MATCHER = LazyObject( - lambda: re.compile(r"^\w:"), globals(), "WINDOWS_DRIVE_MATCHER" -) - - -def expand_case_matching(s): - """Expands a string to a case insensitive globable string.""" - t = [] - openers = {"[", "{"} - closers = {"]", "}"} - nesting = 0 - - drive_part = WINDOWS_DRIVE_MATCHER.match(s) if ON_WINDOWS else None - - if drive_part: - drive_part = drive_part.group(0) - t.append(drive_part) - s = s[len(drive_part) :] - - for c in s: - if c in openers: - nesting += 1 - elif c in closers: - nesting -= 1 - elif nesting > 0: - pass - elif c.isalpha(): - folded = c.casefold() - if len(folded) == 1: - c = "[{0}{1}]".format(c.upper(), c.lower()) - else: - newc = ["[{0}{1}]?".format(f.upper(), f.lower()) for f in folded[:-1]] - newc = "".join(newc) - newc += "[{0}{1}{2}]".format(folded[-1].upper(), folded[-1].lower(), c) - c = newc - t.append(c) - return "".join(t) - - -def globpath( - s, ignore_case=False, return_empty=False, sort_result=None, include_dotfiles=None -): - """Simple wrapper around glob that also expands home and env vars.""" - o, s = _iglobpath( - s, - ignore_case=ignore_case, - sort_result=sort_result, - include_dotfiles=include_dotfiles, - ) - o = list(o) - no_match = [] if return_empty else [s] - return o if len(o) != 0 else no_match - - -def _dotglobstr(s): - modified = False - dotted_s = s - if "/*" in dotted_s: - dotted_s = dotted_s.replace("/*", "/.*") - dotted_s = dotted_s.replace("/.**/.*", "/**/.*") - modified = True - if dotted_s.startswith("*") and not dotted_s.startswith("**"): - dotted_s = "." + dotted_s - modified = True - return dotted_s, modified - - -def _iglobpath(s, ignore_case=False, sort_result=None, include_dotfiles=None): - s = builtins.__xonsh__.expand_path(s) - if sort_result is None: - sort_result = builtins.__xonsh__.env.get("GLOB_SORTED") - if include_dotfiles is None: - include_dotfiles = builtins.__xonsh__.env.get("DOTGLOB") - if ignore_case: - s = expand_case_matching(s) - if sys.version_info > (3, 5): - if "**" in s and "**/*" not in s: - s = s.replace("**", "**/*") - if include_dotfiles: - dotted_s, dotmodified = _dotglobstr(s) - # `recursive` is only a 3.5+ kwarg. - if sort_result: - paths = glob.glob(s, recursive=True) - if include_dotfiles and dotmodified: - paths.extend(glob.iglob(dotted_s, recursive=True)) - paths.sort() - paths = iter(paths) - else: - paths = glob.iglob(s, recursive=True) - if include_dotfiles and dotmodified: - paths = itertools.chain(glob.iglob(dotted_s, recursive=True), paths) - return paths, s - else: - if include_dotfiles: - dotted_s, dotmodified = _dotglobstr(s) - if sort_result: - paths = glob.glob(s) - if include_dotfiles and dotmodified: - paths.extend(glob.iglob(dotted_s)) - paths.sort() - paths = iter(paths) - else: - paths = glob.iglob(s) - if include_dotfiles and dotmodified: - paths = itertools.chain(glob.iglob(dotted_s), paths) - return paths, s - - -def iglobpath(s, ignore_case=False, sort_result=None, include_dotfiles=None): - """Simple wrapper around iglob that also expands home and env vars.""" - try: - return _iglobpath( - s, - ignore_case=ignore_case, - sort_result=sort_result, - include_dotfiles=include_dotfiles, - )[0] - except IndexError: - # something went wrong in the actual iglob() call - return iter(()) - - -def ensure_timestamp(t, datetime_format=None): - if isinstance(t, (int, float)): - return t - try: - return float(t) - except (ValueError, TypeError): - pass - if datetime_format is None: - datetime_format = builtins.__xonsh__.env["XONSH_DATETIME_FORMAT"] - if isinstance(t, datetime.datetime): - t = t.timestamp() - else: - t = datetime.datetime.strptime(t, datetime_format).timestamp() - return t - - -def format_datetime(dt): - """Format datetime object to string base on $XONSH_DATETIME_FORMAT Env.""" - format_ = builtins.__xonsh__.env["XONSH_DATETIME_FORMAT"] - return dt.strftime(format_) - - -def columnize(elems, width=80, newline="\n"): - """Takes an iterable of strings and returns a list of lines with the - elements placed in columns. Each line will be at most *width* columns. - The newline character will be appended to the end of each line. + * iterating over instance of this class is the same as iterating over + the single char - prefix and suffix are ommited. """ - sizes = [len(e) + 1 for e in elems] - total = sum(sizes) - nelem = len(elems) - if total - 1 <= width: - ncols = len(sizes) - nrows = 1 - columns = [sizes] - last_longest_row = total - enter_loop = False - else: - ncols = 1 - nrows = len(sizes) - columns = [sizes] - last_longest_row = max(sizes) - enter_loop = True - while enter_loop: - longest_row = sum(map(max, columns)) - if longest_row - 1 <= width: - # we might be able to fit another column. - ncols += 1 - nrows = nelem // ncols - columns = [sizes[i * nrows : (i + 1) * nrows] for i in range(ncols)] - last_longest_row = longest_row - else: - # we can't fit another column - ncols -= 1 - nrows = nelem // ncols - break - pad = (width - last_longest_row + ncols) // ncols - pad = pad if pad > 1 else 1 - data = [elems[i * nrows : (i + 1) * nrows] for i in range(ncols)] - colwidths = [max(map(len, d)) + pad for d in data] - colwidths[-1] -= pad - row_t = "".join(["{{row[{i}]: <{{w[{i}]}}}}".format(i=i) for i in range(ncols)]) - row_t += newline - lines = [ - row_t.format(row=row, w=colwidths) - for row in itertools.zip_longest(*data, fillvalue="") - ] - return lines - - -ALIAS_KWARG_NAMES = frozenset(["args", "stdin", "stdout", "stderr", "spec", "stack"]) - - -def unthreadable(f): - """Decorator that specifies that a callable alias should be run only - on the main thread process. This is often needed for debuggers and - profilers. - """ - f.__xonsh_threadable__ = False - return f + def __new__(cls, char, prefix='', suffix=''): + return str.__new__(cls, prefix + char + suffix) + def __init__(self, char, prefix='', suffix=''): + self.char = char + self.prefix = prefix + self.suffix = suffix + self.length = 2 + self.iterated = False -def uncapturable(f): - """Decorator that specifies that a callable alias should not be run with - any capturing. This is often needed if the alias call interactive - subprocess, like pagers and text editors. - """ - f.__xonsh_capturable__ = False - return f - + def __len__(self): + return self.length -def carriage_return(): - """Writes a carriage return to stdout, and nothing else.""" - print("\r", flush=True, end="") + def __iter__(self): + return iter(self.char) -def deprecated(deprecated_in=None, removed_in=None): - """Parametrized decorator that deprecates a function in a graceful manner. +RE_HIDDEN_MAX = re.compile('(\001.*?\002)+') - Updates the decorated function's docstring to mention the version - that deprecation occurred in and the version it will be removed - in if both of these values are passed. - When removed_in is not a release equal to or less than the current - release, call ``warnings.warn`` with details, while raising - ``DeprecationWarning``. +_PT_COLORS = {'BLACK': '#000000', + 'RED': '#FF0000', + 'GREEN': '#008000', + 'YELLOW': '#FFFF00', + 'BLUE': '#0000FF', + 'PURPLE': '#0000FF', + 'CYAN': '#00FFFF', + 'WHITE': '#FFFFFF'} - When removed_in is a release equal to or less than the current release, - raise an ``AssertionError``. +_PT_STYLE = {'BOLD': 'bold', + 'UNDERLINE': 'underline', + 'INTENSE': 'italic'} - Parameters - ---------- - deprecated_in : str - The version number that deprecated this function. - removed_in : str - The version number that this function will be removed in. - """ - message_suffix = _deprecated_message_suffix(deprecated_in, removed_in) - if not message_suffix: - message_suffix = "" - - def decorated(func): - warning_message = "{} has been deprecated".format(func.__name__) - warning_message += message_suffix - - @functools.wraps(func) - def wrapped(*args, **kwargs): - _deprecated_error_on_expiration(func.__name__, removed_in) - func(*args, **kwargs) - warnings.warn(warning_message, DeprecationWarning) - - wrapped.__doc__ = ( - "{}\n\n{}".format(wrapped.__doc__, warning_message) - if wrapped.__doc__ - else warning_message - ) - - return wrapped - - return decorated - - -def _deprecated_message_suffix(deprecated_in, removed_in): - if deprecated_in and removed_in: - message_suffix = " in version {} and will be removed in version {}".format( - deprecated_in, removed_in - ) - elif deprecated_in and not removed_in: - message_suffix = " in version {}".format(deprecated_in) - elif not deprecated_in and removed_in: - message_suffix = " and will be removed in version {}".format(removed_in) - else: - message_suffix = None - return message_suffix +def _make_style(color_name): + """ Convert color names to pygments styles codes """ + style = [] + for k, v in _PT_STYLE.items(): + if k in color_name: + style.append(v) + for k, v in _PT_COLORS.items(): + if k in color_name: + style.append(v) + return ' '.join(style) -def _deprecated_error_on_expiration(name, removed_in): - if not removed_in: - return - elif LooseVersion(__version__) >= LooseVersion(removed_in): - raise AssertionError( - "{} has passed its version {} expiry date!".format(name, removed_in) - ) +def get_xonsh_color_names(color_code): + """ Makes a reverse lookup in TERM_COLORS """ + try: + return next(k for k, v in TERM_COLORS.items() if v == color_code) + except StopIteration: + return 'NO_COLOR' + + +def format_prompt_for_prompt_toolkit(prompt): + """Converts a prompt with color codes to a pygments style and tokens + """ + parts = RE_HIDDEN_MAX.split(prompt) + # ensure that parts is [colorcode, string, colorcode, string,...] + if parts and len(parts[0]) == 0: + parts = parts[1:] + else: + parts.insert(0, '') + if len(parts) % 2 != 0: + parts.append() + strings = parts[1::2] + token_names = [get_xonsh_color_names(c) for c in parts[::2]] + cstyles = [_make_style(c) for c in token_names] + return token_names, cstyles, strings diff --git a/xonsh/tracer.py b/xonsh/tracer.py deleted file mode 100644 index cae4692..0000000 --- a/xonsh/tracer.py +++ /dev/null @@ -1,240 +0,0 @@ -"""Implements a xonsh tracer.""" -import os -import re -import sys -import inspect -import argparse -import linecache -import importlib -import functools - -from xonsh.lazyasd import LazyObject -from xonsh.platform import HAS_PYGMENTS -from xonsh.tools import DefaultNotGiven, print_color, normabspath, to_bool -from xonsh.inspectors import find_file, getouterframes -from xonsh.lazyimps import pygments, pyghooks -from xonsh.proc import STDOUT_CAPTURE_KINDS -import xonsh.prompt.cwd as prompt - -terminal = LazyObject( - lambda: importlib.import_module("pygments.formatters.terminal"), - globals(), - "terminal", -) - - -class TracerType(object): - """Represents a xonsh tracer object, which keeps track of all tracing - state. This is a singleton. - """ - - _inst = None - valid_events = frozenset(["line", "call"]) - - def __new__(cls, *args, **kwargs): - if cls._inst is None: - cls._inst = super(TracerType, cls).__new__(cls, *args, **kwargs) - return cls._inst - - def __init__(self): - self.prev_tracer = DefaultNotGiven - self.files = set() - self.usecolor = True - self.lexer = pyghooks.XonshLexer() - self.formatter = terminal.TerminalFormatter() - self._last = ("", -1) # filename, lineno tuple - - def __del__(self): - for f in set(self.files): - self.stop(f) - - def color_output(self, usecolor): - """Specify whether or not the tracer output should be colored.""" - # we have to use a function to set usecolor because of the way that - # lazyasd works. Namely, it cannot dispatch setattr to the target - # object without being unable to access its own __dict__. This makes - # setting an attr look like getting a function. - self.usecolor = usecolor - - def start(self, filename): - """Starts tracing a file.""" - files = self.files - if len(files) == 0: - self.prev_tracer = sys.gettrace() - files.add(normabspath(filename)) - sys.settrace(self.trace) - curr = inspect.currentframe() - for frame, fname, *_ in getouterframes(curr, context=0): - if normabspath(fname) in files: - frame.f_trace = self.trace - - def stop(self, filename): - """Stops tracing a file.""" - filename = normabspath(filename) - self.files.discard(filename) - if len(self.files) == 0: - sys.settrace(self.prev_tracer) - curr = inspect.currentframe() - for frame, fname, *_ in getouterframes(curr, context=0): - if normabspath(fname) == filename: - frame.f_trace = self.prev_tracer - self.prev_tracer = DefaultNotGiven - - def trace(self, frame, event, arg): - """Implements a line tracing function.""" - if event not in self.valid_events: - return self.trace - fname = find_file(frame) - if fname in self.files: - lineno = frame.f_lineno - curr = (fname, lineno) - if curr != self._last: - line = linecache.getline(fname, lineno).rstrip() - s = tracer_format_line( - fname, - lineno, - line, - color=self.usecolor, - lexer=self.lexer, - formatter=self.formatter, - ) - print_color(s) - self._last = curr - return self.trace - - -tracer = LazyObject(TracerType, globals(), "tracer") - -COLORLESS_LINE = "{fname}:{lineno}:{line}" -COLOR_LINE = "{{PURPLE}}{fname}{{BLUE}}:" "{{GREEN}}{lineno}{{BLUE}}:" "{{NO_COLOR}}" - - -def tracer_format_line(fname, lineno, line, color=True, lexer=None, formatter=None): - """Formats a trace line suitable for printing.""" - fname = min(fname, prompt._replace_home(fname), os.path.relpath(fname), key=len) - if not color: - return COLORLESS_LINE.format(fname=fname, lineno=lineno, line=line) - cline = COLOR_LINE.format(fname=fname, lineno=lineno) - if not HAS_PYGMENTS: - return cline + line - # OK, so we have pygments - tokens = pyghooks.partial_color_tokenize(cline) - lexer = lexer or pyghooks.XonshLexer() - tokens += pygments.lex(line, lexer=lexer) - if tokens[-1][1] == "\n": - del tokens[-1] - elif tokens[-1][1].endswith("\n"): - tokens[-1] = (tokens[-1][0], tokens[-1][1].rstrip()) - return tokens - - -# -# Command line interface -# - - -def _find_caller(args): - """Somewhat hacky method of finding the __file__ based on the line executed.""" - re_line = re.compile(r"[^;\s|&<>]+\s+" + r"\s+".join(args)) - curr = inspect.currentframe() - for _, fname, lineno, _, lines, _ in getouterframes(curr, context=1)[3:]: - if lines is not None and re_line.search(lines[0]) is not None: - return fname - elif ( - lineno == 1 and re_line.search(linecache.getline(fname, lineno)) is not None - ): - # There is a bug in CPython such that getouterframes(curr, context=1) - # will actually return the 2nd line in the code_context field, even though - # line number is itself correct. We manually fix that in this branch. - return fname - else: - msg = ( - "xonsh: warning: __file__ name could not be found. You may be " - "trying to trace interactively. Please pass in the file names " - "you want to trace explicitly." - ) - print(msg, file=sys.stderr) - - -def _on(ns, args): - """Turns on tracing for files.""" - for f in ns.files: - if f == "__file__": - f = _find_caller(args) - if f is None: - continue - tracer.start(f) - - -def _off(ns, args): - """Turns off tracing for files.""" - for f in ns.files: - if f == "__file__": - f = _find_caller(args) - if f is None: - continue - tracer.stop(f) - - -def _color(ns, args): - """Manages color action for tracer CLI.""" - tracer.color_output(ns.toggle) - - -@functools.lru_cache(1) -def _tracer_create_parser(): - """Creates tracer argument parser""" - p = argparse.ArgumentParser( - prog="trace", description="tool for tracing xonsh code as it runs." - ) - subp = p.add_subparsers(title="action", dest="action") - onp = subp.add_parser( - "on", aliases=["start", "add"], help="begins tracing selected files." - ) - onp.add_argument( - "files", - nargs="*", - default=["__file__"], - help=( - 'file paths to watch, use "__file__" (default) to select ' - "the current file." - ), - ) - off = subp.add_parser( - "off", aliases=["stop", "del", "rm"], help="removes selected files fom tracing." - ) - off.add_argument( - "files", - nargs="*", - default=["__file__"], - help=( - 'file paths to stop watching, use "__file__" (default) to ' - "select the current file." - ), - ) - col = subp.add_parser("color", help="output color management for tracer.") - col.add_argument( - "toggle", type=to_bool, help="true/false, y/n, etc. to toggle color usage." - ) - return p - - -_TRACER_MAIN_ACTIONS = { - "on": _on, - "add": _on, - "start": _on, - "rm": _off, - "off": _off, - "del": _off, - "stop": _off, - "color": _color, -} - - -def tracermain(args=None, stdin=None, stdout=None, stderr=None, spec=None): - """Main function for tracer command-line interface.""" - parser = _tracer_create_parser() - ns = parser.parse_args(args) - usecolor = (spec.captured not in STDOUT_CAPTURE_KINDS) and sys.stdout.isatty() - tracer.color_output(usecolor) - return _TRACER_MAIN_ACTIONS[ns.action](ns, args) diff --git a/xonsh/winutils.py b/xonsh/winutils.py deleted file mode 100644 index 3930cb5..0000000 --- a/xonsh/winutils.py +++ /dev/null @@ -1,549 +0,0 @@ -""" -This file is based on the code from https://github.com/JustAMan/pyWinClobber/blob/master/win32elevate.py - -Copyright (c) 2013 by JustAMan at GitHub - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -""" -import os -import ctypes -import subprocess -from ctypes import c_ulong, c_char_p, c_int, c_void_p, POINTER, byref -from ctypes.wintypes import ( - HANDLE, - BOOL, - DWORD, - HWND, - HINSTANCE, - HKEY, - LPDWORD, - SHORT, - LPCWSTR, - WORD, - SMALL_RECT, - LPCSTR, -) - -from xonsh.lazyasd import lazyobject -from xonsh import lazyimps # we aren't amalgamated in this module. -from xonsh import platform - - -__all__ = ("sudo",) - - -@lazyobject -def CloseHandle(): - ch = ctypes.windll.kernel32.CloseHandle - ch.argtypes = (HANDLE,) - ch.restype = BOOL - return ch - - -@lazyobject -def GetActiveWindow(): - gaw = ctypes.windll.user32.GetActiveWindow - gaw.argtypes = () - gaw.restype = HANDLE - return gaw - - -TOKEN_READ = 0x20008 - - -class ShellExecuteInfo(ctypes.Structure): - _fields_ = [ - ("cbSize", DWORD), - ("fMask", c_ulong), - ("hwnd", HWND), - ("lpVerb", c_char_p), - ("lpFile", c_char_p), - ("lpParameters", c_char_p), - ("lpDirectory", c_char_p), - ("nShow", c_int), - ("hInstApp", HINSTANCE), - ("lpIDList", c_void_p), - ("lpClass", c_char_p), - ("hKeyClass", HKEY), - ("dwHotKey", DWORD), - ("hIcon", HANDLE), - ("hProcess", HANDLE), - ] - - def __init__(self, **kw): - ctypes.Structure.__init__(self) - self.cbSize = ctypes.sizeof(self) - for field_name, field_value in kw.items(): - setattr(self, field_name, field_value) - - -@lazyobject -def ShellExecuteEx(): - see = ctypes.windll.Shell32.ShellExecuteExA - PShellExecuteInfo = ctypes.POINTER(ShellExecuteInfo) - see.argtypes = (PShellExecuteInfo,) - see.restype = BOOL - return see - - -@lazyobject -def WaitForSingleObject(): - wfso = ctypes.windll.kernel32.WaitForSingleObject - wfso.argtypes = (HANDLE, DWORD) - wfso.restype = DWORD - return wfso - - -# SW_HIDE = 0 -SW_SHOW = 5 -SEE_MASK_NOCLOSEPROCESS = 0x00000040 -SEE_MASK_NO_CONSOLE = 0x00008000 -INFINITE = -1 - - -def wait_and_close_handle(process_handle): - """ - Waits till spawned process finishes and closes the handle for it - - Parameters - ---------- - process_handle : HANDLE - The Windows handle for the process - """ - WaitForSingleObject(process_handle, INFINITE) - CloseHandle(process_handle) - - -def sudo(executable, args=None): - """ - This will re-run current Python script requesting to elevate administrative rights. - - Parameters - ---------- - param executable : str - The path/name of the executable - args : list of str - The arguments to be passed to the executable - """ - if not args: - args = [] - - execute_info = ShellExecuteInfo( - fMask=SEE_MASK_NOCLOSEPROCESS | SEE_MASK_NO_CONSOLE, - hwnd=GetActiveWindow(), - lpVerb=b"runas", - lpFile=executable.encode("utf-8"), - lpParameters=subprocess.list2cmdline(args).encode("utf-8"), - lpDirectory=None, - nShow=SW_SHOW, - ) - - if not ShellExecuteEx(byref(execute_info)): - raise ctypes.WinError() - - wait_and_close_handle(execute_info.hProcess) - - -# -# The following has been refactored from -# http://stackoverflow.com/a/37505496/2312428 -# - -# input flags -ENABLE_PROCESSED_INPUT = 0x0001 -ENABLE_LINE_INPUT = 0x0002 -ENABLE_ECHO_INPUT = 0x0004 -ENABLE_WINDOW_INPUT = 0x0008 -ENABLE_MOUSE_INPUT = 0x0010 -ENABLE_INSERT_MODE = 0x0020 -ENABLE_QUICK_EDIT_MODE = 0x0040 - -# output flags -ENABLE_PROCESSED_OUTPUT = 0x0001 -ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 -ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 # VT100 (Win 10) - - -def check_zero(result, func, args): - if not result: - err = ctypes.get_last_error() - if err: - raise ctypes.WinError(err) - return args - - -@lazyobject -def GetStdHandle(): - return lazyimps._winapi.GetStdHandle - - -@lazyobject -def STDHANDLES(): - """Tuple of the Windows handles for (stdin, stdout, stderr).""" - hs = [ - lazyimps._winapi.STD_INPUT_HANDLE, - lazyimps._winapi.STD_OUTPUT_HANDLE, - lazyimps._winapi.STD_ERROR_HANDLE, - ] - hcons = [] - for h in hs: - hcon = GetStdHandle(int(h)) - hcons.append(hcon) - return tuple(hcons) - - -@lazyobject -def GetConsoleMode(): - gcm = ctypes.windll.kernel32.GetConsoleMode - gcm.errcheck = check_zero - gcm.argtypes = (HANDLE, LPDWORD) # _In_ hConsoleHandle # _Out_ lpMode - return gcm - - -def get_console_mode(fd=1): - """Get the mode of the active console input, output, or error - buffer. Note that if the process isn't attached to a - console, this function raises an EBADF IOError. - - Parameters - ---------- - fd : int - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr - """ - mode = DWORD() - hcon = STDHANDLES[fd] - GetConsoleMode(hcon, byref(mode)) - return mode.value - - -@lazyobject -def SetConsoleMode(): - scm = ctypes.windll.kernel32.SetConsoleMode - scm.errcheck = check_zero - scm.argtypes = (HANDLE, DWORD) # _In_ hConsoleHandle # _Out_ lpMode - return scm - - -def set_console_mode(mode, fd=1): - """Set the mode of the active console input, output, or - error buffer. Note that if the process isn't attached to a - console, this function raises an EBADF IOError. - - Parameters - ---------- - mode : int - Mode flags to set on the handle. - fd : int, optional - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr - """ - hcon = STDHANDLES[fd] - SetConsoleMode(hcon, mode) - - -def enable_virtual_terminal_processing(): - """Enables virtual terminal processing on Windows. - This includes ANSI escape sequence interpretation. - See http://stackoverflow.com/a/36760881/2312428 - """ - SetConsoleMode(GetStdHandle(-11), 7) - - -@lazyobject -def COORD(): - if platform.has_prompt_toolkit(): - # turns out that PTK has a separate ctype wrapper - # for this struct and also wraps similar function calls - # we need to use the same struct to prevent clashes. - import prompt_toolkit.win32_types - - return prompt_toolkit.win32_types.COORD - - class _COORD(ctypes.Structure): - """Struct from the winapi, representing coordinates in the console. - - Attributes - ---------- - X : int - Column position - Y : int - Row position - """ - - _fields_ = [("X", SHORT), ("Y", SHORT)] - - return _COORD - - -@lazyobject -def ReadConsoleOutputCharacterA(): - rcoc = ctypes.windll.kernel32.ReadConsoleOutputCharacterA - rcoc.errcheck = check_zero - rcoc.argtypes = ( - HANDLE, # _In_ hConsoleOutput - LPCSTR, # _Out_ LPTSTR lpMode - DWORD, # _In_ nLength - COORD, # _In_ dwReadCoord, - LPDWORD, - ) # _Out_ lpNumberOfCharsRead - rcoc.restype = BOOL - return rcoc - - -@lazyobject -def ReadConsoleOutputCharacterW(): - rcoc = ctypes.windll.kernel32.ReadConsoleOutputCharacterW - rcoc.errcheck = check_zero - rcoc.argtypes = ( - HANDLE, # _In_ hConsoleOutput - LPCWSTR, # _Out_ LPTSTR lpMode - DWORD, # _In_ nLength - COORD, # _In_ dwReadCoord, - LPDWORD, - ) # _Out_ lpNumberOfCharsRead - rcoc.restype = BOOL - return rcoc - - -def read_console_output_character(x=0, y=0, fd=1, buf=None, bufsize=1024, raw=False): - """Reads characters from the console buffer. - - Parameters - ---------- - x : int, optional - Starting column. - y : int, optional - Starting row. - fd : int, optional - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr. - buf : ctypes.c_wchar_p if raw else ctypes.c_wchar_p, optional - An existing buffer to (re-)use. - bufsize : int, optional - The maximum read size. - raw : bool, optional - Whether to read in and return as bytes (True) or as a - unicode string (False, default). - - Returns - ------- - value : str - Result of what was read, may be shorter than bufsize. - """ - hcon = STDHANDLES[fd] - if buf is None: - if raw: - buf = ctypes.c_char_p(b" " * bufsize) - else: - buf = ctypes.c_wchar_p(" " * bufsize) - coord = COORD(x, y) - n = DWORD() - if raw: - ReadConsoleOutputCharacterA(hcon, buf, bufsize, coord, byref(n)) - else: - ReadConsoleOutputCharacterW(hcon, buf, bufsize, coord, byref(n)) - return buf.value[: n.value] - - -def pread_console(fd, buffersize, offset, buf=None): - """This is a console-based implementation of os.pread() for windows. - that uses read_console_output_character(). - """ - cols, rows = os.get_terminal_size(fd=fd) - x = offset % cols - y = offset // cols - return read_console_output_character( - x=x, y=y, fd=fd, buf=buf, bufsize=buffersize, raw=True - ) - - -# -# The following piece has been forked from colorama.win32 -# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file. -# - - -@lazyobject -def CONSOLE_SCREEN_BUFFER_INFO(): - if platform.has_prompt_toolkit(): - # turns out that PTK has a separate ctype wrapper - # for this struct and also wraps kernel32.GetConsoleScreenBufferInfo - # we need to use the same struct to prevent clashes. - import prompt_toolkit.win32_types - - return prompt_toolkit.win32_types.CONSOLE_SCREEN_BUFFER_INFO - - # Otherwise we should wrap it ourselves - COORD() # force COORD to load - - class _CONSOLE_SCREEN_BUFFER_INFO(ctypes.Structure): - """Struct from in wincon.h. See Windows API docs - for more details. - - Attributes - ---------- - dwSize : COORD - Size of - dwCursorPosition : COORD - Current cursor location. - wAttributes : WORD - Flags for screen buffer. - srWindow : SMALL_RECT - Actual size of screen - dwMaximumWindowSize : COORD - Maximum window scrollback size. - """ - - _fields_ = [ - ("dwSize", COORD), - ("dwCursorPosition", COORD), - ("wAttributes", WORD), - ("srWindow", SMALL_RECT), - ("dwMaximumWindowSize", COORD), - ] - - return _CONSOLE_SCREEN_BUFFER_INFO - - -@lazyobject -def GetConsoleScreenBufferInfo(): - """Returns the windows version of the get screen buffer.""" - gcsbi = ctypes.windll.kernel32.GetConsoleScreenBufferInfo - gcsbi.errcheck = check_zero - gcsbi.argtypes = (HANDLE, POINTER(CONSOLE_SCREEN_BUFFER_INFO)) - gcsbi.restype = BOOL - return gcsbi - - -def get_console_screen_buffer_info(fd=1): - """Returns an screen buffer info object for the relevant stdbuf. - - Parameters - ---------- - fd : int, optional - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr. - - Returns - ------- - csbi : CONSOLE_SCREEN_BUFFER_INFO - Information about the console screen buffer. - """ - hcon = STDHANDLES[fd] - csbi = CONSOLE_SCREEN_BUFFER_INFO() - GetConsoleScreenBufferInfo(hcon, byref(csbi)) - return csbi - - -# -# end colorama forked section -# - - -def get_cursor_position(fd=1): - """Gets the current cursor position as an (x, y) tuple.""" - csbi = get_console_screen_buffer_info(fd=fd) - coord = csbi.dwCursorPosition - return (coord.X, coord.Y) - - -def get_cursor_offset(fd=1): - """Gets the current cursor position as a total offset value.""" - csbi = get_console_screen_buffer_info(fd=fd) - pos = csbi.dwCursorPosition - size = csbi.dwSize - return (pos.Y * size.X) + pos.X - - -def get_position_size(fd=1): - """Gets the current cursor position and screen size tuple: - (x, y, columns, lines). - """ - info = get_console_screen_buffer_info(fd) - return ( - info.dwCursorPosition.X, - info.dwCursorPosition.Y, - info.dwSize.X, - info.dwSize.Y, - ) - - -@lazyobject -def SetConsoleScreenBufferSize(): - """Set screen buffer dimensions.""" - scsbs = ctypes.windll.kernel32.SetConsoleScreenBufferSize - scsbs.errcheck = check_zero - scsbs.argtypes = (HANDLE, COORD) # _In_ HANDLE hConsoleOutput # _In_ COORD dwSize - scsbs.restype = BOOL - return scsbs - - -def set_console_screen_buffer_size(x, y, fd=1): - """Sets the console size for a standard buffer. - - Parameters - ---------- - x : int - Number of columns. - y : int - Number of rows. - fd : int, optional - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr. - """ - coord = COORD() - coord.X = x - coord.Y = y - hcon = STDHANDLES[fd] - rtn = SetConsoleScreenBufferSize(hcon, coord) - return rtn - - -@lazyobject -def SetConsoleCursorPosition(): - """Set cursor position in console.""" - sccp = ctypes.windll.kernel32.SetConsoleCursorPosition - sccp.errcheck = check_zero - sccp.argtypes = ( - HANDLE, # _In_ HANDLE hConsoleOutput - COORD, # _In_ COORD dwCursorPosition - ) - sccp.restype = BOOL - return sccp - - -def set_console_cursor_position(x, y, fd=1): - """Sets the console cursor position for a standard buffer. - - Parameters - ---------- - x : int - Number of columns. - y : int - Number of rows. - fd : int, optional - Standard buffer file descriptor, 0 for stdin, 1 for stdout (default), - and 2 for stderr. - """ - coord = COORD() - coord.X = x - coord.Y = y - hcon = STDHANDLES[fd] - rtn = SetConsoleCursorPosition(hcon, coord) - return rtn diff --git a/xonsh/wizard.py b/xonsh/wizard.py deleted file mode 100644 index 0f6fb46..0000000 --- a/xonsh/wizard.py +++ /dev/null @@ -1,869 +0,0 @@ -"""Tools for creating command-line and web-based wizards from a tree of nodes. -""" -import os -import re -import ast -import json -import pprint -import fnmatch -import builtins -import textwrap -import collections.abc as cabc - -from xonsh.tools import to_bool, to_bool_or_break, backup_file, print_color -from xonsh.jsonutils import serialize_xonsh_json - - -# -# Nodes themselves -# -class Node(object): - """Base type of all nodes.""" - - attrs = () - - def __str__(self): - return PrettyFormatter(self).visit() - - def __repr__(self): - return str(self).replace("\n", "") - - -class Wizard(Node): - """Top-level node in the tree.""" - - attrs = ("children", "path") - - def __init__(self, children, path=None): - self.children = children - self.path = path - - -class Pass(Node): - """Simple do-nothing node""" - - -class Message(Node): - """Contains a simple message to report to the user.""" - - attrs = "message" - - def __init__(self, message): - self.message = message - - -class Question(Node): - """Asks a question and then chooses the next node based on the response. - """ - - attrs = ("question", "responses", "converter", "path") - - def __init__(self, question, responses, converter=None, path=None): - """ - Parameters - ---------- - question : str - The question itself. - responses : dict with str keys and Node values - Mapping from user-input responses to nodes. - converter : callable, optional - Converts the string the user typed into another object - that serves as a key to the responses dict. - path : str or sequence of str, optional - A path within the storage object. - """ - self.question = question - self.responses = responses - self.converter = converter - self.path = path - - -class Input(Node): - """Gets input from the user.""" - - attrs = ("prompt", "converter", "show_conversion", "confirm", "path") - - def __init__( - self, - prompt=">>> ", - converter=None, - show_conversion=False, - confirm=False, - retry=False, - path=None, - ): - """ - Parameters - ---------- - prompt : str, optional - Prompt string prior to input - converter : callable, optional - Converts the string the user typed into another object - prior to storage. - show_conversion : bool, optional - Flag for whether or not to show the results of the conversion - function if the conversion function was meaningfully executed. - Default False. - confirm : bool, optional - Whether the input should be confirmed until true or broken, - default False. - retry : bool, optional - In the event that the conversion operation fails, should - users be re-prompted until they provide valid input. Default False. - path : str or sequence of str, optional - A path within the storage object. - """ - self.prompt = prompt - self.converter = converter - self.show_conversion = show_conversion - self.confirm = confirm - self.retry = retry - self.path = path - - -class While(Node): - """Computes a body while a condition function evaluates to true. - - The condition function has the form ``cond(visitor=None, node=None)`` and - must return an object that responds to the Python magic method ``__bool__``. - The beg attribute specifies the number to start the loop iteration at. - """ - - attrs = ("cond", "body", "idxname", "beg", "path") - - def __init__(self, cond, body, idxname="idx", beg=0, path=None): - """ - Parameters - ---------- - cond : callable - Function that determines if the next loop iteration should - be executed. - body : sequence of nodes - A list of node to execute on each iteration. The condition function - has the form ``cond(visitor=None, node=None)`` and must return an - object that responds to the Python magic method ``__bool__``. - idxname : str, optional - The variable name for the index. - beg : int, optional - The first index value when evaluating path format strings. - path : str or sequence of str, optional - A path within the storage object. - """ - self.cond = cond - self.body = body - self.idxname = idxname - self.beg = beg - self.path = path - - -# -# Helper nodes -# - - -class YesNo(Question): - """Represents a simple yes/no question.""" - - def __init__(self, question, yes, no, path=None): - """ - Parameters - ---------- - question : str - The question itself. - yes : Node - Node to execute if the response is True. - no : Node - Node to execute if the response is False. - path : str or sequence of str, optional - A path within the storage object. - """ - responses = {True: yes, False: no} - super().__init__(question, responses, converter=to_bool, path=path) - - -class TrueFalse(Input): - """Input node the returns a True or False value.""" - - def __init__(self, prompt="yes or no [default: no]? ", path=None): - super().__init__( - prompt=prompt, - converter=to_bool, - show_conversion=False, - confirm=False, - path=path, - ) - - -class TrueFalseBreak(Input): - """Input node the returns a True, False, or 'break' value.""" - - def __init__(self, prompt="yes, no, or break [default: no]? ", path=None): - super().__init__( - prompt=prompt, - converter=to_bool_or_break, - show_conversion=False, - confirm=False, - path=path, - ) - - -class StoreNonEmpty(Input): - """Stores the user input only if the input was not an empty string. - This works by wrapping the converter function. - """ - - def __init__( - self, - prompt=">>> ", - converter=None, - show_conversion=False, - confirm=False, - retry=False, - path=None, - store_raw=False, - ): - def nonempty_converter(x): - """Converts non-empty values and converts empty inputs to - Unstorable. - """ - if len(x) == 0: - x = Unstorable - elif converter is None: - pass - elif store_raw: - converter(x) # make sure str is valid, even if storing raw - else: - x = converter(x) - return x - - super().__init__( - prompt=prompt, - converter=nonempty_converter, - show_conversion=show_conversion, - confirm=confirm, - path=path, - retry=retry, - ) - - -class StateFile(Input): - """Node for representing the state as a file under a default or user - given file name. This node type is likely not useful on its own. - """ - - attrs = ("default_file", "check", "ask_filename") - - def __init__(self, default_file=None, check=True, ask_filename=True): - """ - Parameters - ---------- - default_file : str, optional - The default filename to save the file as. - check : bool, optional - Whether to print the current state and ask if it should be - saved/loaded prior to asking for the file name and saving the - file, default=True. - ask_filename : bool, optional - Whether to ask for the filename (if ``False``, always use the - default filename) - """ - self._df = None - super().__init__(prompt="filename: ", converter=None, confirm=False, path=None) - self.ask_filename = ask_filename - self.default_file = default_file - self.check = check - - @property - def default_file(self): - return self._df - - @default_file.setter - def default_file(self, val): - self._df = val - if val is None: - self.prompt = "filename: " - else: - self.prompt = "filename [default={0!r}]: ".format(val) - - -class SaveJSON(StateFile): - """Node for saving the state as a JSON file under a default or user - given file name. - """ - - -class LoadJSON(StateFile): - """Node for loading the state as a JSON file under a default or user - given file name. - """ - - -class FileInserter(StateFile): - """Node for inserting the state into a file in between a prefix and suffix. - The state is converted according to some dumper rules. - """ - - attrs = ("prefix", "suffix", "dump_rules", "default_file", "check", "ask_filename") - - def __init__( - self, - prefix, - suffix, - dump_rules, - default_file=None, - check=True, - ask_filename=True, - ): - """ - Parameters - ---------- - prefix : str - Starting unique string in file to find and begin the insertion at, - e.g. '# XONSH WIZARD START\n' - suffix : str - Ending unique string to find in the file and end the replacement at, - e.g. '\n# XONSH WIZARD END' - dump_rules : dict of strs to functions - This is a dictionary that maps the path-like match strings to functions - that take the flat path and the value as arguments and convert the state - value at a path to a string. The keys here may use wildcards (as seen in - the standard library fnmatch module). For example:: - - dump_rules = { - '/path/to/exact': lambda path, x: str(x), - '/otherpath/*': lambda path, x: x, - '*ending': lambda path x: repr(x), - '/': None, - } - - If a wildcard is not used in a path, then that rule will be used - used on an exact match. If wildcards are used, the deepest and longest - match is used. If None is given instead of a the function, it means to - skip generating that key. - default_file : str, optional - The default filename to save the file as. - check : bool, optional - Whether to print the current state and ask if it should be - saved/loaded prior to asking for the file name and saving the - file, default=True. - ask_filename : bool, optional - Whether to ask for the filename (if ``False``, always use the - default filename) - """ - self._dr = None - super().__init__( - default_file=default_file, check=check, ask_filename=ask_filename - ) - self.prefix = prefix - self.suffix = suffix - self.dump_rules = self.string_rules = dump_rules - - @property - def dump_rules(self): - return self._dr - - @dump_rules.setter - def dump_rules(self, value): - dr = {} - for key, func in value.items(): - key_trans = fnmatch.translate(key) - r = re.compile(key_trans) - dr[r] = func - self._dr = dr - - @staticmethod - def _find_rule_key(x): - """Key function for sorting regular expression rules""" - return (x[0], len(x[1].pattern)) - - def find_rule(self, path): - """For a path, find the key and conversion function that should be used to - dump a value. - """ - if path in self.string_rules: - return path, self.string_rules[path] - len_funcs = [] - for rule, func in self.dump_rules.items(): - m = rule.match(path) - if m is None: - continue - i, j = m.span() - len_funcs.append((j - i, rule, func)) - if len(len_funcs) == 0: - # No dump rule function for path - return path, None - len_funcs.sort(reverse=True, key=self._find_rule_key) - _, rule, func = len_funcs[0] - return rule, func - - def dumps(self, flat): - """Dumps a flat mapping of (string path keys, values) pairs and returns - a formatted string. - """ - lines = [self.prefix] - for path, value in sorted(flat.items()): - rule, func = self.find_rule(path) - if func is None: - continue - line = func(path, value) - lines.append(line) - lines.append(self.suffix) - new = "\n".join(lines) + "\n" - return new - - -def create_truefalse_cond(prompt="yes or no [default: no]? ", path=None): - """This creates a basic condition function for use with nodes like While - or other conditions. The condition function creates and visits a TrueFalse - node and returns the result. This TrueFalse node takes the prompt and - path that is passed in here. - """ - - def truefalse_cond(visitor, node=None): - """Prompts the user for a true/false condition.""" - tf = TrueFalse(prompt=prompt, path=path) - rtn = visitor.visit(tf) - return rtn - - return truefalse_cond - - -# -# Tools for trees of nodes. -# - - -def _lowername(cls): - return cls.__name__.lower() - - -class Visitor(object): - """Super-class for all classes that should walk over a tree of nodes. - This implements the visit() method. - """ - - def __init__(self, tree=None): - self.tree = tree - - def visit(self, node=None): - """Walks over a node. If no node is provided, the tree is used.""" - if node is None: - node = self.tree - if node is None: - raise RuntimeError("no node or tree given!") - for clsname in map(_lowername, type.mro(node.__class__)): - meth = getattr(self, "visit_" + clsname, None) - if callable(meth): - rtn = meth(node) - break - else: - msg = "could not find valid visitor method for {0} on {1}" - nodename = node.__class__.__name__ - selfname = self.__class__.__name__ - raise AttributeError(msg.format(nodename, selfname)) - return rtn - - -class PrettyFormatter(Visitor): - """Formats a tree of nodes into a pretty string""" - - def __init__(self, tree=None, indent=" "): - super().__init__(tree=tree) - self.level = 0 - self.indent = indent - - def visit_node(self, node): - s = node.__class__.__name__ + "(" - if len(node.attrs) == 0: - return s + ")" - s += "\n" - self.level += 1 - t = [] - for aname in node.attrs: - a = getattr(node, aname) - t.append(self.visit(a) if isinstance(a, Node) else pprint.pformat(a)) - t = ["{0}={1}".format(n, x) for n, x in zip(node.attrs, t)] - s += textwrap.indent(",\n".join(t), self.indent) - self.level -= 1 - s += "\n)" - return s - - def visit_wizard(self, node): - s = "Wizard(children=[" - if len(node.children) == 0: - if node.path is None: - return s + "])" - else: - return s + "], path={0!r})".format(node.path) - s += "\n" - self.level += 1 - s += textwrap.indent(",\n".join(map(self.visit, node.children)), self.indent) - self.level -= 1 - if node.path is None: - s += "\n])" - else: - s += "{0}],\n{0}path={1!r}\n)".format(self.indent, node.path) - return s - - def visit_message(self, node): - return "Message({0!r})".format(node.message) - - def visit_question(self, node): - s = node.__class__.__name__ + "(\n" - self.level += 1 - s += self.indent + "question={0!r},\n".format(node.question) - s += self.indent + "responses={" - if len(node.responses) == 0: - s += "}" - else: - s += "\n" - t = sorted(node.responses.items()) - t = ["{0!r}: {1}".format(k, self.visit(v)) for k, v in t] - s += textwrap.indent(",\n".join(t), 2 * self.indent) - s += "\n" + self.indent + "}" - if node.converter is not None: - s += ",\n" + self.indent + "converter={0!r}".format(node.converter) - if node.path is not None: - s += ",\n" + self.indent + "path={0!r}".format(node.path) - self.level -= 1 - s += "\n)" - return s - - def visit_input(self, node): - s = "{0}(prompt={1!r}".format(node.__class__.__name__, node.prompt) - if node.converter is None and node.path is None: - return s + "\n)" - if node.converter is not None: - s += ",\n" + self.indent + "converter={0!r}".format(node.converter) - s += ",\n" + self.indent + "show_conversion={0!r}".format(node.show_conversion) - s += ",\n" + self.indent + "confirm={0!r}".format(node.confirm) - s += ",\n" + self.indent + "retry={0!r}".format(node.retry) - if node.path is not None: - s += ",\n" + self.indent + "path={0!r}".format(node.path) - s += "\n)" - return s - - def visit_statefile(self, node): - s = "{0}(default_file={1!r}, check={2}, ask_filename={3})" - s = s.format( - node.__class__.__name__, node.default_file, node.check, node.ask_filename - ) - return s - - def visit_while(self, node): - s = "{0}(cond={1!r}".format(node.__class__.__name__, node.cond) - s += ",\n" + self.indent + "body=[" - if len(node.body) > 0: - s += "\n" - self.level += 1 - s += textwrap.indent(",\n".join(map(self.visit, node.body)), self.indent) - self.level -= 1 - s += "\n" + self.indent - s += "]" - s += ",\n" + self.indent + "idxname={0!r}".format(node.idxname) - s += ",\n" + self.indent + "beg={0!r}".format(node.beg) - if node.path is not None: - s += ",\n" + self.indent + "path={0!r}".format(node.path) - s += "\n)" - return s - - -def ensure_str_or_int(x): - """Creates a string or int.""" - if isinstance(x, int): - return x - x = x if isinstance(x, str) else str(x) - try: - x = ast.literal_eval(x) - except (ValueError, SyntaxError): - pass - if not isinstance(x, (int, str)): - msg = "{0!r} could not be converted to int or str".format(x) - raise ValueError(msg) - return x - - -def canon_path(path, indices=None): - """Returns the canonical form of a path, which is a tuple of str or ints. - Indices may be optionally passed in. - """ - if not isinstance(path, str): - return tuple(map(ensure_str_or_int, path)) - if indices is not None: - path = path.format(**indices) - path = path[1:] if path.startswith("/") else path - path = path[:-1] if path.endswith("/") else path - if len(path) == 0: - return () - return tuple(map(ensure_str_or_int, path.split("/"))) - - -class UnstorableType(object): - """Represents an unstorable return value for when no input was given - or such input was skipped. Typically represented by the Unstorable - singleton. - """ - - _inst = None - - def __new__(cls, *args, **kwargs): - if cls._inst is None: - cls._inst = super(UnstorableType, cls).__new__(cls, *args, **kwargs) - return cls._inst - - -Unstorable = UnstorableType() - - -class StateVisitor(Visitor): - """This class visits the nodes and stores the results in a top-level - dict of data according to the state path of the node. The the node - does not have a path or the path does not exist, the storage is skipped. - This class can be optionally initialized with an existing state. - """ - - def __init__(self, tree=None, state=None, indices=None): - super().__init__(tree=tree) - self.state = {} if state is None else state - self.indices = {} if indices is None else indices - - def visit(self, node=None): - if node is None: - node = self.tree - if node is None: - raise RuntimeError("no node or tree given!") - rtn = super().visit(node) - path = getattr(node, "path", None) - if callable(path): - path = path(visitor=self, node=node, val=rtn) - if path is not None and rtn is not Unstorable: - self.store(path, rtn, indices=self.indices) - return rtn - - def store(self, path, val, indices=None): - """Stores a value at the path location.""" - path = canon_path(path, indices=indices) - loc = self.state - for p, n in zip(path[:-1], path[1:]): - if isinstance(p, str) and p not in loc: - loc[p] = {} if isinstance(n, str) else [] - elif isinstance(p, int) and abs(p) + (p >= 0) > len(loc): - i = abs(p) + (p >= 0) - len(loc) - if isinstance(n, str): - ex = [{} for _ in range(i)] - else: - ex = [[] for _ in range(i)] - loc.extend(ex) - loc = loc[p] - p = path[-1] - if isinstance(p, int) and abs(p) + (p >= 0) > len(loc): - i = abs(p) + (p >= 0) - len(loc) - ex = [None] * i - loc.extend(ex) - loc[p] = val - - def flatten(self, path="/", value=None, flat=None): - """Returns a dict version of the store whose keys are paths. - Note that list and dict entries will always end in '/', allowing - disambiquation in dump_rules. - """ - value = self.state if value is None else value - flat = {} if flat is None else flat - if isinstance(value, cabc.Mapping): - path = path if path.endswith("/") else path + "/" - flat[path] = value - for k, v in value.items(): - p = path + k - self.flatten(path=p, value=v, flat=flat) - elif isinstance(value, (str, bytes)): - flat[path] = value - elif isinstance(value, cabc.Sequence): - path = path if path.endswith("/") else path + "/" - flat[path] = value - for i, v in enumerate(value): - p = path + str(i) - self.flatten(path=p, value=v, flat=flat) - else: - flat[path] = value - return flat - - -YN = "{GREEN}yes{NO_COLOR} or {RED}no{NO_COLOR} [default: no]? " -YNB = ( - "{GREEN}yes{NO_COLOR}, {RED}no{NO_COLOR}, or " - "{YELLOW}break{NO_COLOR} [default: no]? " -) - - -class PromptVisitor(StateVisitor): - """Visits the nodes in the tree via the a command-line prompt.""" - - def __init__(self, tree=None, state=None, **kwargs): - """ - Parameters - ---------- - tree : Node, optional - Tree of nodes to start visitor with. - state : dict, optional - Initial state to begin with. - kwargs : optional - Options that are passed through to the prompt via the shell's - singleline() method. See BaseShell for mor details. - """ - super().__init__(tree=tree, state=state) - self.env = builtins.__xonsh__.env - self.shell = builtins.__xonsh__.shell.shell - self.shell_kwargs = kwargs - - def visit_wizard(self, node): - for child in node.children: - self.visit(child) - - def visit_pass(self, node): - pass - - def visit_message(self, node): - print_color(node.message) - - def visit_question(self, node): - self.env["PROMPT"] = node.question - r = self.shell.singleline(**self.shell_kwargs) - if callable(node.converter): - r = node.converter(r) - self.visit(node.responses[r]) - return r - - def visit_input(self, node): - need_input = True - while need_input: - self.env["PROMPT"] = node.prompt - raw = self.shell.singleline(**self.shell_kwargs) - if callable(node.converter): - try: - x = node.converter(raw) - except KeyboardInterrupt: - raise - except Exception: - if node.retry: - msg = ( - "{{BOLD_RED}}Invalid{{NO_COLOR}} input {0!r}, " - "please retry." - ) - print_color(msg.format(raw)) - continue - else: - raise - if node.show_conversion and x is not Unstorable and str(x) != raw: - msg = "{{BOLD_PURPLE}}Converted{{NO_COLOR}} input {0!r} to {1!r}." - print_color(msg.format(raw, x)) - else: - x = raw - if node.confirm: - msg = "Would you like to keep the input: {0}" - print(msg.format(pprint.pformat(x))) - confirmer = TrueFalseBreak(prompt=YNB) - status = self.visit(confirmer) - if isinstance(status, str) and status == "break": - x = Unstorable - break - else: - need_input = not status - else: - need_input = False - return x - - def visit_while(self, node): - rtns = [] - origidx = self.indices.get(node.idxname, None) - self.indices[node.idxname] = idx = node.beg - while node.cond(visitor=self, node=node): - rtn = list(map(self.visit, node.body)) - rtns.append(rtn) - idx += 1 - self.indices[node.idxname] = idx - if origidx is None: - del self.indices[node.idxname] - else: - self.indices[node.idxname] = origidx - return rtns - - def visit_savejson(self, node): - jstate = json.dumps( - self.state, indent=1, sort_keys=True, default=serialize_xonsh_json - ) - if node.check: - msg = "The current state is:\n\n{0}\n" - print(msg.format(textwrap.indent(jstate, " "))) - ap = "Would you like to save this state, " + YN - asker = TrueFalse(prompt=ap) - do_save = self.visit(asker) - if not do_save: - return Unstorable - fname = None - if node.ask_filename: - fname = self.visit_input(node) - if fname is None or len(fname) == 0: - fname = node.default_file - if os.path.isfile(fname): - backup_file(fname) - else: - os.makedirs(os.path.dirname(fname), exist_ok=True) - with open(fname, "w") as f: - f.write(jstate) - return fname - - def visit_loadjson(self, node): - if node.check: - ap = "Would you like to load an existing file, " + YN - asker = TrueFalse(prompt=ap) - do_load = self.visit(asker) - if not do_load: - return Unstorable - fname = self.visit_input(node) - if fname is None or len(fname) == 0: - fname = node.default_file - if os.path.isfile(fname): - with open(fname, "r") as f: - self.state = json.load(f) - print_color("{{GREEN}}{0!r} loaded.{{NO_COLOR}}".format(fname)) - else: - print_color( - ("{{RED}}{0!r} could not be found, " "continuing.{{NO_COLOR}}").format( - fname - ) - ) - return fname - - def visit_fileinserter(self, node): - # perform the dumping operation. - new = node.dumps(self.flatten()) - # check if we should write this out - if node.check: - msg = "The current state to insert is:\n\n{0}\n" - print(msg.format(textwrap.indent(new, " "))) - ap = "Would you like to write out the current state, " + YN - asker = TrueFalse(prompt=ap) - do_save = self.visit(asker) - if not do_save: - return Unstorable - # get and backup the file. - fname = None - if node.ask_filename: - fname = self.visit_input(node) - if fname is None or len(fname) == 0: - fname = node.default_file - if os.path.isfile(fname): - with open(fname, "r") as f: - s = f.read() - before, _, s = s.partition(node.prefix) - _, _, after = s.partition(node.suffix) - backup_file(fname) - else: - before = after = "" - dname = os.path.dirname(fname) - if dname: - os.makedirs(dname, exist_ok=True) - # write out the file - with open(fname, "w") as f: - f.write(before + new + after) - return fname diff --git a/xonsh/xonfig.py b/xonsh/xonfig.py deleted file mode 100644 index da5f004..0000000 --- a/xonsh/xonfig.py +++ /dev/null @@ -1,759 +0,0 @@ -"""The xonsh configuration (xonfig) utility.""" -import os -import re -import ast -import json -import shutil -import random -import pprint -import textwrap -import builtins -import argparse -import functools -import itertools -import contextlib -import collections - -from xonsh.ply import ply - -import xonsh.wizard as wiz -from xonsh import __version__ as XONSH_VERSION -from xonsh.prompt.base import is_template_string -from xonsh.platform import ( - is_readline_available, - ptk_version, - PYTHON_VERSION_INFO, - pygments_version, - ON_POSIX, - ON_LINUX, - linux_distro, - ON_DARWIN, - ON_WINDOWS, - ON_CYGWIN, - DEFAULT_ENCODING, - ON_MSYS, - githash, -) -from xonsh.tools import ( - to_bool, - is_string, - print_exception, - is_superuser, - color_style_names, - print_color, - color_style, -) -from xonsh.foreign_shells import CANON_SHELL_NAMES -from xonsh.xontribs import xontrib_metadata, find_xontrib -from xonsh.lazyasd import lazyobject - -HR = "'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'`-.,_,.-*'" -WIZARD_HEAD = """ - {{BOLD_WHITE}}Welcome to the xonsh configuration wizard!{{NO_COLOR}} - {{YELLOW}}------------------------------------------{{NO_COLOR}} -This will present a guided tour through setting up the xonsh static -config file. Xonsh will automatically ask you if you want to run this -wizard if the configuration file does not exist. However, you can -always rerun this wizard with the xonfig command: - - $ xonfig wizard - -This wizard will load an existing configuration, if it is available. -Also never fear when this wizard saves its results! It will create -a backup of any existing configuration automatically. - -This wizard has two main phases: foreign shell setup and environment -variable setup. Each phase may be skipped in its entirety. - -For the configuration to take effect, you will need to restart xonsh. - -{hr} -""".format( - hr=HR -) - -WIZARD_FS = """ -{hr} - - {{BOLD_WHITE}}Foreign Shell Setup{{NO_COLOR}} - {{YELLOW}}-------------------{{NO_COLOR}} -The xonsh shell has the ability to interface with foreign shells such -as Bash, or zsh (fish not yet implemented). - -For configuration, this means that xonsh can load the environment, -aliases, and functions specified in the config files of these shells. -Naturally, these shells must be available on the system to work. -Being able to share configuration (and source) from foreign shells -makes it easier to transition to and from xonsh. -""".format( - hr=HR -) - -WIZARD_ENV = """ -{hr} - - {{BOLD_WHITE}}Environment Variable Setup{{NO_COLOR}} - {{YELLOW}}--------------------------{{NO_COLOR}} -The xonsh shell also allows you to setup environment variables from -the static configuration file. Any variables set in this way are -superseded by the definitions in the xonshrc or on the command line. -Still, setting environment variables in this way can help define -options that are global to the system or user. - -The following lists the environment variable name, its documentation, -the default value, and the current value. The default and current -values are presented as pretty repr strings of their Python types. - -{{BOLD_GREEN}}Note:{{NO_COLOR}} Simply hitting enter for any environment variable -will accept the default value for that entry. -""".format( - hr=HR -) - -WIZARD_ENV_QUESTION = "Would you like to set env vars now, " + wiz.YN - -WIZARD_XONTRIB = """ -{hr} - - {{BOLD_WHITE}}Xontribs{{NO_COLOR}} - {{YELLOW}}--------{{NO_COLOR}} -No shell is complete without extensions, and xonsh is no exception. Xonsh -extensions are called {{BOLD_GREEN}}xontribs{{NO_COLOR}}, or xonsh contributions. -Xontribs are dynamically loadable, either by importing them directly or by -using the 'xontrib' command. However, you can also configure xonsh to load -xontribs automatically on startup prior to loading the run control files. -This allows the xontrib to be used immediately in your xonshrc files. - -The following describes all xontribs that have been registered with xonsh. -These come from users, 3rd party developers, or xonsh itself! -""".format( - hr=HR -) - -WIZARD_XONTRIB_QUESTION = "Would you like to enable xontribs now, " + wiz.YN - -WIZARD_TAIL = """ -Thanks for using the xonsh configuration wizard!""" - - -_XONFIG_SOURCE_FOREIGN_SHELL_COMMAND = collections.defaultdict( - lambda: "source-foreign", bash="source-bash", cmd="source-cmd", zsh="source-zsh" -) - - -def _dump_xonfig_foreign_shell(path, value): - shell = value["shell"] - shell = CANON_SHELL_NAMES.get(shell, shell) - cmd = [_XONFIG_SOURCE_FOREIGN_SHELL_COMMAND[shell]] - interactive = value.get("interactive", None) - if interactive is not None: - cmd.extend(["--interactive", str(interactive)]) - login = value.get("login", None) - if login is not None: - cmd.extend(["--login", str(login)]) - envcmd = value.get("envcmd", None) - if envcmd is not None: - cmd.extend(["--envcmd", envcmd]) - aliascmd = value.get("aliasmd", None) - if aliascmd is not None: - cmd.extend(["--aliascmd", aliascmd]) - extra_args = value.get("extra_args", None) - if extra_args: - cmd.extend(["--extra-args", repr(" ".join(extra_args))]) - safe = value.get("safe", None) - if safe is not None: - cmd.extend(["--safe", str(safe)]) - prevcmd = value.get("prevcmd", "") - if prevcmd: - cmd.extend(["--prevcmd", repr(prevcmd)]) - postcmd = value.get("postcmd", "") - if postcmd: - cmd.extend(["--postcmd", repr(postcmd)]) - funcscmd = value.get("funcscmd", None) - if funcscmd: - cmd.extend(["--funcscmd", repr(funcscmd)]) - sourcer = value.get("sourcer", None) - if sourcer: - cmd.extend(["--sourcer", sourcer]) - if cmd[0] == "source-foreign": - cmd.append(shell) - cmd.append('"echo loading xonsh foreign shell"') - return " ".join(cmd) - - -def _dump_xonfig_env(path, value): - name = os.path.basename(path.rstrip("/")) - ensurer = builtins.__xonsh__.env.get_ensurer(name) - dval = str(value) if ensurer.detype is None else ensurer.detype(value) - dval = str(value) if dval is None else dval - return "${name} = {val!r}".format(name=name, val=dval) - - -def _dump_xonfig_xontribs(path, value): - return "xontrib load {0}".format(" ".join(value)) - - -@lazyobject -def XONFIG_DUMP_RULES(): - return { - "/": None, - "/env/": None, - "/foreign_shells/*/": _dump_xonfig_foreign_shell, - "/env/*": _dump_xonfig_env, - "/env/*/[0-9]*": None, - "/xontribs/": _dump_xonfig_xontribs, - } - - -def make_fs_wiz(): - """Makes the foreign shell part of the wizard.""" - cond = wiz.create_truefalse_cond(prompt="Add a new foreign shell, " + wiz.YN) - fs = wiz.While( - cond=cond, - body=[ - wiz.Input("shell name (e.g. bash): ", path="/foreign_shells/{idx}/shell"), - wiz.StoreNonEmpty( - "interactive shell [bool, default=True]: ", - converter=to_bool, - show_conversion=True, - path="/foreign_shells/{idx}/interactive", - ), - wiz.StoreNonEmpty( - "login shell [bool, default=False]: ", - converter=to_bool, - show_conversion=True, - path="/foreign_shells/{idx}/login", - ), - wiz.StoreNonEmpty( - "env command [str, default='env']: ", - path="/foreign_shells/{idx}/envcmd", - ), - wiz.StoreNonEmpty( - "alias command [str, default='alias']: ", - path="/foreign_shells/{idx}/aliascmd", - ), - wiz.StoreNonEmpty( - ("extra command line arguments [list of str, " "default=[]]: "), - converter=ast.literal_eval, - show_conversion=True, - path="/foreign_shells/{idx}/extra_args", - ), - wiz.StoreNonEmpty( - "safely handle exceptions [bool, default=True]: ", - converter=to_bool, - show_conversion=True, - path="/foreign_shells/{idx}/safe", - ), - wiz.StoreNonEmpty( - "pre-command [str, default='']: ", path="/foreign_shells/{idx}/prevcmd" - ), - wiz.StoreNonEmpty( - "post-command [str, default='']: ", path="/foreign_shells/{idx}/postcmd" - ), - wiz.StoreNonEmpty( - "foreign function command [str, default=None]: ", - path="/foreign_shells/{idx}/funcscmd", - ), - wiz.StoreNonEmpty( - "source command [str, default=None]: ", - path="/foreign_shells/{idx}/sourcer", - ), - wiz.Message(message="Foreign shell added.\n"), - ], - ) - return fs - - -def _wrap_paragraphs(text, width=70, **kwargs): - """Wraps paragraphs instead.""" - pars = text.split("\n") - pars = ["\n".join(textwrap.wrap(p, width=width, **kwargs)) for p in pars] - s = "\n".join(pars) - return s - - -ENVVAR_MESSAGE = """ -{{BOLD_CYAN}}${name}{{NO_COLOR}} -{docstr} -{{RED}}default value:{{NO_COLOR}} {default} -{{RED}}current value:{{NO_COLOR}} {current}""" - -ENVVAR_PROMPT = "{BOLD_GREEN}>>>{NO_COLOR} " - - -def make_exit_message(): - """Creates a message for how to exit the wizard.""" - shell_type = builtins.__xonsh__.shell.shell_type - keyseq = "Ctrl-D" if shell_type == "readline" else "Ctrl-C" - msg = "To exit the wizard at any time, press {BOLD_UNDERLINE_CYAN}" - msg += keyseq + "{NO_COLOR}.\n" - m = wiz.Message(message=msg) - return m - - -def make_envvar(name): - """Makes a StoreNonEmpty node for an environment variable.""" - env = builtins.__xonsh__.env - vd = env.get_docs(name) - if not vd.configurable: - return - default = vd.default - if "\n" in default: - default = "\n" + _wrap_paragraphs(default, width=69) - curr = env.get(name) - if is_string(curr) and is_template_string(curr): - curr = curr.replace("{", "{{").replace("}", "}}") - curr = pprint.pformat(curr, width=69) - if "\n" in curr: - curr = "\n" + curr - msg = ENVVAR_MESSAGE.format( - name=name, - default=default, - current=curr, - docstr=_wrap_paragraphs(vd.docstr, width=69), - ) - mnode = wiz.Message(message=msg) - ens = env.get_ensurer(name) - path = "/env/" + name - pnode = wiz.StoreNonEmpty( - ENVVAR_PROMPT, - converter=ens.convert, - show_conversion=True, - path=path, - retry=True, - store_raw=vd.store_as_str, - ) - return mnode, pnode - - -def _make_flat_wiz(kidfunc, *args): - kids = map(kidfunc, *args) - flatkids = [] - for k in kids: - if k is None: - continue - flatkids.extend(k) - wizard = wiz.Wizard(children=flatkids) - return wizard - - -def make_env_wiz(): - """Makes an environment variable wizard.""" - w = _make_flat_wiz(make_envvar, sorted(builtins.__xonsh__.env._docs.keys())) - return w - - -XONTRIB_PROMPT = "{BOLD_GREEN}Add this xontrib{NO_COLOR}, " + wiz.YN - - -def _xontrib_path(visitor=None, node=None, val=None): - # need this to append only based on user-selected size - return ("xontribs", len(visitor.state.get("xontribs", ()))) - - -def make_xontrib(xontrib, package): - """Makes a message and StoreNonEmpty node for a xontrib.""" - name = xontrib.get("name", "") - msg = "\n{BOLD_CYAN}" + name + "{NO_COLOR}\n" - if "url" in xontrib: - msg += "{RED}url:{NO_COLOR} " + xontrib["url"] + "\n" - if "package" in xontrib: - msg += "{RED}package:{NO_COLOR} " + xontrib["package"] + "\n" - if "url" in package: - if "url" in xontrib and package["url"] != xontrib["url"]: - msg += "{RED}package-url:{NO_COLOR} " + package["url"] + "\n" - if "license" in package: - msg += "{RED}license:{NO_COLOR} " + package["license"] + "\n" - msg += "{PURPLE}installed?{NO_COLOR} " - msg += ("no" if find_xontrib(name) is None else "yes") + "\n" - desc = xontrib.get("description", "") - if not isinstance(desc, str): - desc = "".join(desc) - msg += _wrap_paragraphs(desc, width=69) - if msg.endswith("\n"): - msg = msg[:-1] - mnode = wiz.Message(message=msg) - convert = lambda x: name if to_bool(x) else wiz.Unstorable - pnode = wiz.StoreNonEmpty(XONTRIB_PROMPT, converter=convert, path=_xontrib_path) - return mnode, pnode - - -def make_xontribs_wiz(): - """Makes a xontrib wizard.""" - md = xontrib_metadata() - pkgs = [md["packages"].get(d.get("package", None), {}) for d in md["xontribs"]] - w = _make_flat_wiz(make_xontrib, md["xontribs"], pkgs) - return w - - -def make_xonfig_wizard(default_file=None, confirm=False, no_wizard_file=None): - """Makes a configuration wizard for xonsh config file. - - Parameters - ---------- - default_file : str, optional - Default filename to save and load to. User will still be prompted. - confirm : bool, optional - Confirm that the main part of the wizard should be run. - no_wizard_file : str, optional - Filename for that will flag to future runs that the wizard should not be - run again. If None (default), this defaults to default_file. - """ - w = wiz.Wizard( - children=[ - wiz.Message(message=WIZARD_HEAD), - make_exit_message(), - wiz.Message(message=WIZARD_FS), - make_fs_wiz(), - wiz.Message(message=WIZARD_ENV), - wiz.YesNo(question=WIZARD_ENV_QUESTION, yes=make_env_wiz(), no=wiz.Pass()), - wiz.Message(message=WIZARD_XONTRIB), - wiz.YesNo( - question=WIZARD_XONTRIB_QUESTION, yes=make_xontribs_wiz(), no=wiz.Pass() - ), - wiz.Message(message="\n" + HR + "\n"), - wiz.FileInserter( - prefix="# XONSH WIZARD START", - suffix="# XONSH WIZARD END", - dump_rules=XONFIG_DUMP_RULES, - default_file=default_file, - check=True, - ), - wiz.Message(message=WIZARD_TAIL), - ] - ) - if confirm: - q = ( - "Would you like to run the xonsh configuration wizard now?\n\n" - "1. Yes (You can abort at any time)\n" - "2. No, but ask me next time.\n" - "3. No, and don't ask me again.\n\n" - "1, 2, or 3 [default: 2]? " - ) - no_wizard_file = default_file if no_wizard_file is None else no_wizard_file - passer = wiz.Pass() - saver = wiz.SaveJSON( - check=False, ask_filename=False, default_file=no_wizard_file - ) - w = wiz.Question( - q, {1: w, 2: passer, 3: saver}, converter=lambda x: int(x) if x != "" else 2 - ) - return w - - -def _wizard(ns): - env = builtins.__xonsh__.env - shell = builtins.__xonsh__.shell.shell - fname = env.get("XONSHRC")[-1] if ns.file is None else ns.file - no_wiz = os.path.join(env.get("XONSH_CONFIG_DIR"), "no-wizard") - w = make_xonfig_wizard( - default_file=fname, confirm=ns.confirm, no_wizard_file=no_wiz - ) - tempenv = {"PROMPT": "", "XONSH_STORE_STDOUT": False} - pv = wiz.PromptVisitor(w, store_in_history=False, multiline=False) - - @contextlib.contextmanager - def force_hide(): - if env.get("XONSH_STORE_STDOUT") and hasattr(shell, "_force_hide"): - orig, shell._force_hide = shell._force_hide, False - yield - shell._force_hide = orig - else: - yield - - with force_hide(), env.swap(tempenv): - try: - pv.visit() - except (KeyboardInterrupt, Exception): - print() - print_exception() - - -def _xonfig_format_human(data): - wcol1 = wcol2 = 0 - for key, val in data: - wcol1 = max(wcol1, len(key)) - wcol2 = max(wcol2, len(str(val))) - hr = "+" + ("-" * (wcol1 + 2)) + "+" + ("-" * (wcol2 + 2)) + "+\n" - row = "| {key!s:<{wcol1}} | {val!s:<{wcol2}} |\n" - s = hr - for key, val in data: - s += row.format(key=key, wcol1=wcol1, val=val, wcol2=wcol2) - s += hr - return s - - -def _xonfig_format_json(data): - data = {k.replace(" ", "_"): v for k, v in data} - s = json.dumps(data, sort_keys=True, indent=1) + "\n" - return s - - -def _info(ns): - env = builtins.__xonsh__.env - data = [("xonsh", XONSH_VERSION)] - hash_, date_ = githash() - if hash_: - data.append(("Git SHA", hash_)) - data.append(("Commit Date", date_)) - data.extend( - [ - ("Python", "{}.{}.{}".format(*PYTHON_VERSION_INFO)), - ("PLY", ply.__version__), - ("have readline", is_readline_available()), - ("prompt toolkit", ptk_version() or None), - ("shell type", env.get("SHELL_TYPE")), - ("pygments", pygments_version()), - ("on posix", bool(ON_POSIX)), - ("on linux", bool(ON_LINUX)), - ] - ) - if ON_LINUX: - data.append(("distro", linux_distro())) - data.extend( - [ - ("on darwin", ON_DARWIN), - ("on windows", ON_WINDOWS), - ("on cygwin", ON_CYGWIN), - ("on msys2", ON_MSYS), - ("is superuser", is_superuser()), - ("default encoding", DEFAULT_ENCODING), - ("xonsh encoding", env.get("XONSH_ENCODING")), - ("encoding errors", env.get("XONSH_ENCODING_ERRORS")), - ] - ) - formatter = _xonfig_format_json if ns.json else _xonfig_format_human - s = formatter(data) - return s - - -def _styles(ns): - env = builtins.__xonsh__.env - curr = env.get("XONSH_COLOR_STYLE") - styles = sorted(color_style_names()) - if ns.json: - s = json.dumps(styles, sort_keys=True, indent=1) - print(s) - return - lines = [] - for style in styles: - if style == curr: - lines.append("* {GREEN}" + style + "{NO_COLOR}") - else: - lines.append(" " + style) - s = "\n".join(lines) - print_color(s) - - -def _str_colors(cmap, cols): - color_names = sorted(cmap.keys(), key=(lambda s: (len(s), s))) - grper = lambda s: min(cols // (len(s) + 1), 8) - lines = [] - for n, group in itertools.groupby(color_names, key=grper): - width = cols // n - line = "" - for i, name in enumerate(group): - buf = " " * (width - len(name)) - line += "{" + name + "}" + name + "{NO_COLOR}" + buf - if (i + 1) % n == 0: - lines.append(line) - line = "" - if len(line) != 0: - lines.append(line) - return "\n".join(lines) - - -def _tok_colors(cmap, cols): - from xonsh.style_tools import Color - - nc = Color.NO_COLOR - names_toks = {} - for t in cmap.keys(): - name = str(t) - if name.startswith("Token.Color."): - _, _, name = name.rpartition(".") - names_toks[name] = t - color_names = sorted(names_toks.keys(), key=(lambda s: (len(s), s))) - grper = lambda s: min(cols // (len(s) + 1), 8) - toks = [] - for n, group in itertools.groupby(color_names, key=grper): - width = cols // n - for i, name in enumerate(group): - toks.append((names_toks[name], name)) - buf = " " * (width - len(name)) - if (i + 1) % n == 0: - buf += "\n" - toks.append((nc, buf)) - if not toks[-1][1].endswith("\n"): - toks[-1] = (nc, toks[-1][1] + "\n") - return toks - - -def _colors(args): - columns, _ = shutil.get_terminal_size() - columns -= int(ON_WINDOWS) - style_stash = builtins.__xonsh__.env["XONSH_COLOR_STYLE"] - - if args.style is not None: - if args.style not in color_style_names(): - print("Invalid style: {}".format(args.style)) - return - builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = args.style - - color_map = color_style() - akey = next(iter(color_map)) - if isinstance(akey, str): - s = _str_colors(color_map, columns) - else: - s = _tok_colors(color_map, columns) - print_color(s) - builtins.__xonsh__.env["XONSH_COLOR_STYLE"] = style_stash - - -def _tutorial(args): - import webbrowser - - webbrowser.open("http://xon.sh/tutorial.html") - - -@functools.lru_cache(1) -def _xonfig_create_parser(): - p = argparse.ArgumentParser( - prog="xonfig", description="Manages xonsh configuration." - ) - subp = p.add_subparsers(title="action", dest="action") - info = subp.add_parser( - "info", help=("displays configuration information, " "default action") - ) - info.add_argument( - "--json", action="store_true", default=False, help="reports results as json" - ) - wiz = subp.add_parser("wizard", help="displays configuration information") - wiz.add_argument( - "--file", default=None, help="config file location, default=$XONSHRC" - ) - wiz.add_argument( - "--confirm", - action="store_true", - default=False, - help="confirm that the wizard should be run.", - ) - sty = subp.add_parser("styles", help="prints available xonsh color styles") - sty.add_argument( - "--json", action="store_true", default=False, help="reports results as json" - ) - colors = subp.add_parser("colors", help="preview color style") - colors.add_argument( - "style", nargs="?", default=None, help="style to preview, default: " - ) - subp.add_parser("tutorial", help="Launch tutorial in browser.") - return p - - -_XONFIG_MAIN_ACTIONS = { - "info": _info, - "wizard": _wizard, - "styles": _styles, - "colors": _colors, - "tutorial": _tutorial, -} - - -def xonfig_main(args=None): - """Main xonfig entry point.""" - if not args or ( - args[0] not in _XONFIG_MAIN_ACTIONS and args[0] not in {"-h", "--help"} - ): - args.insert(0, "info") - parser = _xonfig_create_parser() - ns = parser.parse_args(args) - if ns.action is None: # apply default action - ns = parser.parse_args(["info"] + args) - return _XONFIG_MAIN_ACTIONS[ns.action](ns) - - -@lazyobject -def STRIP_COLOR_RE(): - return re.compile("{.*?}") - - -def _align_string(string, align="<", fill=" ", width=80): - """ Align and pad a color formatted string """ - linelen = len(STRIP_COLOR_RE.sub("", string)) - padlen = max(width - linelen, 0) - if align == "^": - return fill * (padlen // 2) + string + fill * (padlen // 2 + padlen % 2) - elif align == ">": - return fill * padlen + string - elif align == "<": - return string + fill * padlen - else: - return string - - -@lazyobject -def TAGLINES(): - return [ - "Exofrills in the shell", - "No frills in the shell", - "Become the Lord of the Files", - "Break out of your shell", - "The only shell that is also a shell", - "All that is and all that shell be", - "It cannot be that hard", - "Pass the xonsh, Piggy", - "Piggy glanced nervously into hell and cradled the xonsh", - "The xonsh is a symbol", - "It is pronounced conch", - "The shell, bourne again", - "Snailed it", - "Starfish loves you", - "Come snail away", - "This is Major Tom to Ground Xonshtrol", - "Sally sells csh and keeps xonsh to herself", - "Nice indeed. Everything's accounted for, except your old shell.", - "I wanna thank you for putting me back in my snail shell", - "Crustaceanly Yours", - "With great shell comes great reproducibility", - "None shell pass", - "You shell not pass!", - "The x-on shell", - "Ever wonder why there isn't a Taco Shell? Because it is a corny idea.", - "The carcolh will catch you!", - "People xonshtantly mispronounce these things", - "WHAT...is your favorite shell?", - "Conches for the xonsh god!", - "Python-powered, cross-platform, Unix-gazing shell", - "Tab completion in Alderaan places", - "This fix was trickier than expected", - "The unholy cross of Bash/Python", - ] - - -# list of strings or tuples (string, align, fill) -WELCOME_MSG = [ - "", - ("{{INTENSE_WHITE}}Welcome to the xonsh shell ({version}){{NO_COLOR}}", "^", " "), - "", - ("{{INTENSE_RED}}~{{NO_COLOR}} {tagline} {{INTENSE_RED}}~{{NO_COLOR}}", "^", " "), - "", - ("{{INTENSE_BLACK}}", "<", "-"), - "{{GREEN}}xonfig{{NO_COLOR}} tutorial {{INTENSE_WHITE}}-> Launch the tutorial in " - "the browser{{NO_COLOR}}", - "{{GREEN}}xonfig{{NO_COLOR}} wizard {{INTENSE_WHITE}}-> Run the configuration " - "wizard and claim your shell {{NO_COLOR}}", - "{{INTENSE_BLACK}}(Note: Run the Wizard or create a {{RED}}~/.xonshrc{{INTENSE_BLACK}} file " - "to suppress the welcome screen)", - "", -] - - -def print_welcome_screen(): - subst = dict(tagline=random.choice(list(TAGLINES)), version=XONSH_VERSION) - for elem in WELCOME_MSG: - if isinstance(elem, str): - elem = (elem, "", "") - line = elem[0].format(**subst) - termwidth = os.get_terminal_size().columns - line = _align_string(line, elem[1], elem[2], width=termwidth) - print_color(line) diff --git a/xonsh/xonshrc b/xonsh/xonshrc deleted file mode 100644 index 01d7727..0000000 --- a/xonsh/xonshrc +++ /dev/null @@ -1,9 +0,0 @@ -# adjust some paths -#$BASH_COMPLETIONS.append('/usr/local/etc/bash_completion.d/git-completion.bash') -#$PATH.append('/some/path/bin') -#$LD_LIBRARY_PATH = ['/some/path1/lib', '/some/path2/lib', ''] - -aliases['hi'] = echo hello world - -# some customization options -#$MULTILINE_PROMPT = '`·.,¸,.·*¯`·.,¸,.·*¯' \ No newline at end of file diff --git a/xonsh/xontribs.json b/xonsh/xontribs.json deleted file mode 100644 index b779e3d..0000000 --- a/xonsh/xontribs.json +++ /dev/null @@ -1,312 +0,0 @@ -{"xontribs": [ - {"name": "apt_tabcomplete", - "package": "xonsh-apt-tabcomplete", - "url": "https://github.com/DangerOnTheRanger/xonsh-apt-tabcomplete", - "description": ["Adds tabcomplete functionality to apt-get/apt-cache inside of xonsh."] - }, - {"name": "autojump", - "package": "xontrib-autojump", - "url": "https://github.com/gsaga/autojump-xonsh", - "description": ["autojump support for xonsh"] - }, - {"name": "autoxsh", - "package": "xonsh-autoxsh", - "url": "https://github.com/Granitas/xonsh-autoxsh", - "description": ["Adds automatic execution of xonsh script files called", - "``.autoxsh`` when enterting a directory with ``cd`` function"] - }, - {"name": "bashisms", - "package": "xonsh", - "url": "http://xon.sh", - "description": [ - "Enables additional Bash-like syntax while at the command prompt. For ", - "example, the ``!!`` syntax for running the previous command is now usable.", - "Note that these features are implemented as precommand events and these ", - "additions do not affect the xonsh language when run as script. That said, ", - "you might find them useful if you have strong muscle memory.\n\n", - "**Warning:** This xontrib may modify user command line input to implement ", - "its behavior. To see the modifications as they are applied (in unified diff", - "format), please set ``$XONSH_DEBUG`` to ``2`` or higher."] - }, - {"name": "base16_shell", - "package": "xontrib-base16-shell", - "url": "https://github.com/ErickTucto/xontrib-base16-shell", - "description": ["Change base16 shell themes"] - }, - {"name": "coreutils", - "package": "xonsh", - "url": "http://xon.sh", - "description": [ - "Additional core utilities that are implemented in xonsh. The current list ", - "includes:\n", - "\n", - "* cat\n", - "* echo\n", - "* pwd\n", - "* tee\n", - "* tty", - "* yes\n", - "\n", - "In many cases, these may have a lower performance overhead than the ", - "posix command line utility with the same name. This is because these ", - "tools avoid the need for a full subprocess call. Additionally, these ", - "tools are cross-platform."] - }, - {"name": "direnv", - "package": "xonsh-direnv", - "url": "https://github.com/74th/xonsh-direnv", - "description": ["Supports direnv."] - }, - {"name": "distributed", - "package": "xonsh", - "url": "http://xon.sh", - "description": [ - "The distributed parallel computing library hooks for xonsh. ", - "Importantly this provides a substitute 'dworker' command which enables ", - "distributed workers to have access to xonsh builtins.\n\n", - "Furthermore, this xontrib adds a 'DSubmitter' context manager for ", - "executing a block remotely. Moreover, this also adds a convenience ", - "function 'dsubmit()' for creating DSubmitter and Executor instances ", - "at the same time. Thus users may submit distributed jobs with::\n\n", - " with dsubmit('127.0.0.1:8786', rtn='x') as dsub:\n", - " x = $(echo I am elsewhere)\n\n", - " res = dsub.future.result()\n", - " print(res)\n\n", - "This is useful for long running or non-blocking jobs."] - }, - {"name": "docker_tabcomplete", - "package": "xonsh-docker-tabcomplete", - "url": "https://github.com/xsteadfastx/xonsh-docker-tabcomplete", - "description": ["Adds tabcomplete functionality to docker inside of xonsh."] - }, - {"name": "jedi", - "package": "xonsh", - "url": "http://xon.sh", - "description": ["Jedi tab completion hooks for xonsh."] - }, - {"name": "mpl", - "package": "xonsh", - "url": "http://xon.sh", - "description": ["Matplotlib hooks for xonsh, including the new 'mpl' alias ", - "that displays the current figure on the screen."] - }, - {"name": "prompt_ret_code", - "package": "xonsh", - "url": "http://xon.sh", - "description": ["Adds return code info to the prompt"] - }, - {"name": "free_cwd", - "package": "xonsh", - "url": "http://xon.sh", - "description": [ - "Windows only xontrib, to release the lock on the current directory ", - "whenever the prompt is shown. Enabling this will allow the other ", - "programs or Windows Explorer to delete or rename the current or parent ", - "directories. Internally, it is accomplished by temporarily resetting ", - "CWD to the root drive folder while waiting at the prompt. This only ", - "works with the prompt_toolkit backend and can cause cause issues ", - "if any extensions are enabled that hook the prompt and relies on ", - "``os.getcwd()``"] - }, - {"name": "whole_word_jumping", - "package": "xonsh", - "url": "http://xon.sh", - "description": [ - "Jumping across whole words (non-whitespace) with Ctrl+Left/Right.", - "Alt+Left/Right remains unmodified to jump over smaller word segments."] - }, - {"name": "scrapy_tabcomplete", - "package": "xonsh-scrapy-tabcomplete", - "url": "https://github.com/Granitas/xonsh-scrapy-tabcomplete", - "description": ["Adds tabcomplete functionality to scrapy inside of xonsh."] - }, - {"name": "vox", - "package": "xonsh", - "url": "http://xon.sh", - "description": ["Python virtual environment manager for xonsh."] - }, - {"name": "vox_tabcomplete", - "package": "xonsh-vox-tabcomplete", - "url": "https://github.com/Granitosaurus/xonsh-vox-tabcomplete", - "description": ["Adds tabcomplete functionality to vox inside of xonsh."] - }, - {"name": "xo", - "package": "exofrills", - "url": "https://github.com/scopatz/xo", - "description": ["Adds an 'xo' alias to run the exofrills text editor in the ", - "current Python interpreter session. This shaves off a ", - "bit of the startup time when running your favorite, minimal ", - "text editor."] - }, - {"name": "xonda", - "package": "xonda", - "url": "https://github.com/gforsyth/xonda", - "description": ["A thin wrapper around conda with tab completion"] - }, - {"name": "avox", - "package": "xontrib-avox", - "url": "https://github.com/astronouth7303/xontrib-avox", - "description": ["Automatic (de)activation of virtual environments as you cd around"] - }, - {"name": "z", - "package": "xontrib-z", - "url": "https://github.com/astronouth7303/xontrib-z", - "description": ["Tracks your most used directories, based on 'frecency'."] - }, - {"name": "powerline", - "package": "xontrib-powerline", - "url": "https://github.com/santagada/xontrib-powerline", - "description": ["Powerline for Xonsh shell"] - }, - {"name": "prompt_vi_mode", - "package": "xontrib-prompt-vi-mode", - "url": "https://github.com/t184256/xontrib-prompt-vi-mode", - "description": ["vi-mode status formatter for xonsh prompt"] - }, - {"name": "click_tabcomplete", - "package": "xonsh-click-tabcomplete", - "url": "https://github.com/Granitosaurus/xonsh-click-tabcomplete", - "description": ["Adds tabcomplete functionality to click based python applications inside of xonsh."] - }, - {"name": "fzf-widgets", - "package": "xontrib-fzf-widgets", - "url": "https://github.com/shahinism/xontrib-fzf-widgets", - "description": ["Adds some fzf widgets to your xonsh shell."] - }, - {"name": "schedule", - "package": "xontrib-schedule", - "url": "https://github.com/astronouth7303/xontrib-schedule", - "description": ["Xonsh Task Scheduling"] - } - ], - "packages": { - "exofrills": { - "license": "WTFPL", - "url": "http://exofrills.org", - "install": { - "conda": "conda install -c conda-forge xo", - "pip": "xpip install exofrills"} - }, - "xonsh": { - "license": "BSD 3-clause", - "url": "http://xon.sh", - "install": { - "conda": "conda install -c conda-forge xonsh", - "pip": "xpip install xonsh", - "aura": "sudo aura -A xonsh", - "yaourt": "yaourt -Sa xonsh"} - }, - "xontrib-prompt-ret-code": { - "license": "MIT", - "url": "https://github.com/Siecje/xontrib-prompt-ret-code", - "install": { - "pip": "xpip install xontrib-prompt-ret-code" - } - }, - "xonsh-apt-tabcomplete": { - "license": "BSD 2-clause", - "url": "https://github.com/DangerOnTheRanger/xonsh-apt-tabcomplete", - "install": { - "pip": "xpip install xonsh-apt-tabcomplete" - } - }, - "xonsh-direnv": { - "license": "MIT", - "url": "https://github.com/74th/xonsh-direnv", - "install": { - "pip": "xpip install xonsh-direnv" - } - }, - "xonsh-docker-tabcomplete": { - "license": "MIT", - "url": "https://github.com/xsteadfastx/xonsh-docker-tabcomplete", - "install": { - "pip": "xpip install xonsh-docker-tabcomplete" - } - }, - "xonsh-scrapy-tabcomplete": { - "license": "GPLv3", - "url": "https://github.com/Granitas/xonsh-scrapy-tabcomplete", - "install": { - "pip": "xpip install xonsh-scrapy-tabcomplete" - } - }, - "xonsh-vox-tabcomplete": { - "license": "GPLv3", - "url": "https://github.com/Granitosaurus/xonsh-vox-tabcomplete", - "install": { - "pip": "xpip install xonsh-vox-tabcomplete" - } - }, - "xonsh-click-tabcomplete": { - "license": "GPLv3", - "url": "https://github.com/Granitosaurus/xonsh-click-tabcomplete", - "install": { - "pip": "xpip install xonsh-click-tabcomplete" - } - }, - "xonsh-autoxsh": { - "license": "GPLv3", - "url": "https://github.com/Granitas/xonsh-autoxsh", - "install": { - "pip": "xpip install xonsh-autoxsh" - } - }, - "xonda": { - "license": "MIT", - "url": "https://github.com/gforsyth/xonda", - "install": { - "pip": "xpip install xonda" - } - }, - "xontrib-avox": { - "license": "GPLv3", - "url": "https://github.com/astronouth7303/xontrib-avox", - "install": { - "pip": "xpip install xontrib-avox" - } - }, - "xontrib-z": { - "license": "GPLv3", - "url": "https://github.com/astronouth7303/xontrib-z", - "install": { - "pip": "xpip install xontrib-z" - } - }, - "xontrib-powerline": { - "license": "MIT", - "url": "https://github.com/santagada/xontrib-powerline", - "install": { - "pip": "xpip install xontrib-powerline" - } - }, - "xontrib-thefuck": { - "license": "MIT", - "url": "https://github.com/meatballs/xontrib-thefuck", - "install": { - "pip": "xpip install xontrib-thefuck" - } - }, - "xontrib-prompt-vi-mode": { - "license": "MIT", - "url": "https://github.com/t184256/xontrib-prompt-vi-mode", - "install": { - "pip": "xpip install xontrib-prompt-vi-mode" - } - }, - "xontrib-fzf-widgets": { - "license": "GPLv3", - "url": "https://github.com/shahinism/xontrib-fzf-widgets", - "install": { - "pip": "xpip install xontrib-fzf-widgets" - } - }, - "xontrib-schedule": { - "license": "MIT", - "url": "https://github.com/astronouth7303/xontrib-schedule", - "install": { - "pip": "xpip install xontrib-schedule" - } - } - } -} diff --git a/xonsh/xontribs.py b/xonsh/xontribs.py deleted file mode 100644 index c392f67..0000000 --- a/xonsh/xontribs.py +++ /dev/null @@ -1,178 +0,0 @@ -"""Tools for helping manage xontributions.""" -import os -import sys -import json -import builtins -import argparse -import functools -import importlib -import importlib.util - -from xonsh.tools import print_color, unthreadable - - -@functools.lru_cache(1) -def xontribs_json(): - return os.path.join(os.path.dirname(__file__), "xontribs.json") - - -def find_xontrib(name): - """Finds a xontribution from its name.""" - if name.startswith("."): - spec = importlib.util.find_spec(name, package="xontrib") - else: - spec = importlib.util.find_spec("." + name, package="xontrib") - return spec or importlib.util.find_spec(name) - - -def xontrib_context(name): - """Return a context dictionary for a xontrib of a given name.""" - spec = find_xontrib(name) - if spec is None: - return None - m = importlib.import_module(spec.name) - pubnames = getattr(m, "__all__", None) - if pubnames is not None: - ctx = {k: getattr(m, k) for k in pubnames} - else: - ctx = {k: getattr(m, k) for k in dir(m) if not k.startswith("_")} - return ctx - - -def prompt_xontrib_install(names): - """Returns a formatted string with name of xontrib package to prompt user""" - md = xontrib_metadata() - packages = [] - for name in names: - for xontrib in md["xontribs"]: - if xontrib["name"] == name: - packages.append(xontrib["package"]) - - print( - "The following xontribs are enabled but not installed: \n" - " {xontribs}\n" - "To install them run \n" - " xpip install {packages}".format( - xontribs=" ".join(names), packages=" ".join(packages) - ) - ) - - -def update_context(name, ctx=None): - """Updates a context in place from a xontrib. If ctx is not provided, - then __xonsh__.ctx is updated. - """ - if ctx is None: - ctx = builtins.__xonsh__.ctx - if not hasattr(update_context, "bad_imports"): - update_context.bad_imports = [] - modctx = xontrib_context(name) - if modctx is None: - update_context.bad_imports.append(name) - return ctx - return ctx.update(modctx) - - -@functools.lru_cache() -def xontrib_metadata(): - """Loads and returns the xontribs.json file.""" - with open(xontribs_json(), "r") as f: - md = json.load(f) - return md - - -def xontribs_load(names, verbose=False): - """Load xontribs from a list of names""" - ctx = builtins.__xonsh__.ctx - for name in names: - if verbose: - print("loading xontrib {0!r}".format(name)) - update_context(name, ctx=ctx) - if update_context.bad_imports: - prompt_xontrib_install(update_context.bad_imports) - del update_context.bad_imports - - -def _load(ns): - """load xontribs""" - xontribs_load(ns.names, verbose=ns.verbose) - - -def _list(ns): - """Lists xontribs.""" - meta = xontrib_metadata() - data = [] - nname = 6 # ensures some buffer space. - names = None if len(ns.names) == 0 else set(ns.names) - for md in meta["xontribs"]: - name = md["name"] - if names is not None and md["name"] not in names: - continue - nname = max(nname, len(name)) - spec = find_xontrib(name) - if spec is None: - installed = loaded = False - else: - installed = True - loaded = spec.name in sys.modules - d = {"name": name, "installed": installed, "loaded": loaded} - data.append(d) - if ns.json: - jdata = {d.pop("name"): d for d in data} - s = json.dumps(jdata) - print(s) - else: - s = "" - for d in data: - name = d["name"] - lname = len(name) - s += "{PURPLE}" + name + "{NO_COLOR} " + " " * (nname - lname) - if d["installed"]: - s += "{GREEN}installed{NO_COLOR} " - else: - s += "{RED}not-installed{NO_COLOR} " - if d["loaded"]: - s += "{GREEN}loaded{NO_COLOR}" - else: - s += "{RED}not-loaded{NO_COLOR}" - s += "\n" - print_color(s[:-1]) - - -@functools.lru_cache() -def _create_xontrib_parser(): - # parse command line args - parser = argparse.ArgumentParser( - prog="xontrib", description="Manages xonsh extensions" - ) - subp = parser.add_subparsers(title="action", dest="action") - load = subp.add_parser("load", help="loads xontribs") - load.add_argument( - "-v", "--verbose", action="store_true", default=False, dest="verbose" - ) - load.add_argument("names", nargs="+", default=(), help="names of xontribs") - lyst = subp.add_parser( - "list", help=("list xontribs, whether they are " "installed, and loaded.") - ) - lyst.add_argument( - "--json", action="store_true", default=False, help="reports results as json" - ) - lyst.add_argument("names", nargs="*", default=(), help="names of xontribs") - return parser - - -_MAIN_XONTRIB_ACTIONS = {"load": _load, "list": _list} - - -@unthreadable -def xontribs_main(args=None, stdin=None): - """Alias that loads xontribs""" - if not args or ( - args[0] not in _MAIN_XONTRIB_ACTIONS and args[0] not in {"-h", "--help"} - ): - args.insert(0, "load") - parser = _create_xontrib_parser() - ns = parser.parse_args(args) - if ns.action is None: # apply default action - ns = parser.parse_args(["load"] + args) - return _MAIN_XONTRIB_ACTIONS[ns.action](ns) diff --git a/xonsh/xoreutils/__init__.py b/xonsh/xoreutils/__init__.py deleted file mode 100644 index ffcf112..0000000 --- a/xonsh/xoreutils/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# amalgamate -# amalgamate end diff --git a/xonsh/xoreutils/_which.py b/xonsh/xoreutils/_which.py deleted file mode 100644 index 159135f..0000000 --- a/xonsh/xoreutils/_which.py +++ /dev/null @@ -1,367 +0,0 @@ -#!/usr/bin/env python -# Copyright (c) 2002-2007 ActiveState Software Inc. - -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be included -# in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS -# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# -# Author: -# Trent Mick (TrentM@ActiveState.com) -# Home: -# http://trentm.com/projects/which/ -import os -import sys -import stat -import getopt -import builtins -import collections.abc as cabc - -r"""Find the full path to commands. - -which(command, path=None, verbose=0, exts=None) - Return the full path to the first match of the given command on the - path. - -whichall(command, path=None, verbose=0, exts=None) - Return a list of full paths to all matches of the given command on - the path. - -whichgen(command, path=None, verbose=0, exts=None) - Return a generator which will yield full paths to all matches of the - given command on the path. - -By default the PATH environment variable is searched (as well as, on -Windows, the AppPaths key in the registry), but a specific 'path' list -to search may be specified as well. On Windows, the PATHEXT environment -variable is applied as appropriate. - -If "verbose" is true then a tuple of the form - (, ) -is returned for each match. The latter element is a textual description -of where the match was found. For example: - from PATH element 0 - from HKLM\SOFTWARE\...\perl.exe -""" - -_cmdlnUsage = """ - Show the full path of commands. - - Usage: - which [...] [...] - - Options: - -h, --help Print this help and exit. - -V, --version Print the version info and exit. - - -a, --all Print *all* matching paths. - -v, --verbose Print out how matches were located and - show near misses on stderr. - -q, --quiet Just print out matches. I.e., do not print out - near misses. - - -p , --path= - An alternative path (list of directories) may - be specified for searching. - -e , --exts= - Specify a list of extensions to consider instead - of the usual list (';'-separate list, Windows - only). - - Show the full path to the program that would be run for each given - command name, if any. Which, like GNU's which, returns the number of - failed arguments, or -1 when no was given. - - Near misses include duplicates, non-regular files and (on Un*x) - files without executable access. -""" - -__version_info__ = (1, 2, 0) -__version__ = ".".join(map(str, __version_info__)) -__all__ = ["which", "whichall", "whichgen", "WhichError"] - - -class WhichError(Exception): - pass - - -# internal support stuff - - -def _getRegisteredExecutable(exeName): - """Windows allow application paths to be registered in the registry.""" - registered = None - if sys.platform.startswith("win"): - if os.path.splitext(exeName)[1].lower() != ".exe": - exeName += ".exe" - try: - import winreg as _winreg - except ImportError: - import _winreg - try: - key = "SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\" + exeName - value = _winreg.QueryValue(_winreg.HKEY_LOCAL_MACHINE, key) - registered = (value, "from HKLM\\" + key) - except _winreg.error: - pass - if registered and not os.path.exists(registered[0]): - registered = None - return registered - - -def _samefile(fname1, fname2): - if sys.platform.startswith("win"): - return os.path.normpath(os.path.normcase(fname1)) == os.path.normpath( - os.path.normcase(fname2) - ) - else: - return os.path.samefile(fname1, fname2) - - -def _cull(potential, matches, verbose=0): - """Cull inappropriate matches. Possible reasons: - - a duplicate of a previous match - - not a disk file - - not executable (non-Windows) - If 'potential' is approved it is returned and added to 'matches'. - Otherwise, None is returned. - """ - for match in matches: # don't yield duplicates - if _samefile(potential[0], match[0]): - if verbose: - sys.stderr.write("duplicate: %s (%s)\n" % potential) - return None - else: - if not stat.S_ISREG(os.stat(potential[0]).st_mode): - if verbose: - sys.stderr.write("not a regular file: %s (%s)\n" % potential) - elif sys.platform != "win32" and not os.access(potential[0], os.X_OK): - if verbose: - sys.stderr.write("no executable access: %s (%s)\n" % potential) - else: - matches.append(potential) - return potential - - -# module API - - -def whichgen(command, path=None, verbose=0, exts=None): - """Return a generator of full paths to the given command. - - "command" is a the name of the executable to search for. - "path" is an optional alternate path list to search. The default it - to use the PATH environment variable. - "verbose", if true, will cause a 2-tuple to be returned for each - match. The second element is a textual description of where the - match was found. - "exts" optionally allows one to specify a list of extensions to use - instead of the standard list for this system. This can - effectively be used as an optimization to, for example, avoid - stat's of "foo.vbs" when searching for "foo" and you know it is - not a VisualBasic script but ".vbs" is on PATHEXT. This option - is only supported on Windows. - - This method returns a generator which yields tuples of the form (, ). - """ - matches = [] - if path is None: - usingGivenPath = 0 - path = os.environ.get("PATH", "").split(os.pathsep) - if sys.platform.startswith("win"): - path.insert(0, os.curdir) # implied by Windows shell - else: - usingGivenPath = 1 - - # Windows has the concept of a list of extensions (PATHEXT env var). - if sys.platform.startswith("win"): - if exts is None: - exts = builtins.__xonsh__.env["PATHEXT"] - # If '.exe' is not in exts then obviously this is Win9x and - # or a bogus PATHEXT, then use a reasonable default. - for ext in exts: - if ext.lower() == ".exe": - break - else: - exts = [".COM", ".EXE", ".BAT", ".CMD"] - elif not isinstance(exts, cabc.Sequence): - raise TypeError("'exts' argument must be a sequence or None") - else: - if exts is not None: - raise WhichError( - "'exts' argument is not supported on " "platform '%s'" % sys.platform - ) - exts = [] - - # File name cannot have path separators because PATH lookup does not - # work that way. - if os.sep in command or os.altsep and os.altsep in command: - if os.path.exists(command): - match = _cull((command, "explicit path given"), matches, verbose) - yield match - else: - for i in range(len(path)): - dirName = path[i] - # On windows the dirName *could* be quoted, drop the quotes - if ( - sys.platform.startswith("win") - and len(dirName) >= 2 - and dirName[0] == '"' - and dirName[-1] == '"' - ): - dirName = dirName[1:-1] - for ext in [""] + exts: - absName = os.path.abspath( - os.path.normpath(os.path.join(dirName, command + ext)) - ) - if os.path.isfile(absName): - if usingGivenPath: - fromWhere = "from given path element %d" % i - elif not sys.platform.startswith("win"): - fromWhere = "from PATH element %d" % i - elif i == 0: - fromWhere = "from current directory" - else: - fromWhere = "from PATH element %d" % (i - 1) - match = _cull((absName, fromWhere), matches, verbose) - if match: - yield match - match = _getRegisteredExecutable(command) - if match is not None: - match = _cull(match, matches, verbose) - if match: - yield match - - -def which(command, path=None, verbose=0, exts=None): - """Return the full path to the first match of the given command on - the path. - - "command" is a the name of the executable to search for. - "path" is an optional alternate path list to search. The default it - to use the PATH environment variable. - "verbose", if true, will cause a 2-tuple to be returned. The second - element is a textual description of where the match was found. - "exts" optionally allows one to specify a list of extensions to use - instead of the standard list for this system. This can - effectively be used as an optimization to, for example, avoid - stat's of "foo.vbs" when searching for "foo" and you know it is - not a VisualBasic script but ".vbs" is on PATHEXT. This option - is only supported on Windows. - - If no match is found for the command, a WhichError is raised. - """ - try: - absName, fromWhere = next(whichgen(command, path, verbose, exts)) - except StopIteration: - raise WhichError("Could not find '%s' on the path." % command) - if verbose: - return absName, fromWhere - else: - return absName - - -def whichall(command, path=None, verbose=0, exts=None): - """Return a list of full paths to all matches of the given command - on the path. - - "command" is a the name of the executable to search for. - "path" is an optional alternate path list to search. The default it - to use the PATH environment variable. - "verbose", if true, will cause a 2-tuple to be returned for each - match. The second element is a textual description of where the - match was found. - "exts" optionally allows one to specify a list of extensions to use - instead of the standard list for this system. This can - effectively be used as an optimization to, for example, avoid - stat's of "foo.vbs" when searching for "foo" and you know it is - not a VisualBasic script but ".vbs" is on PATHEXT. This option - is only supported on Windows. - """ - if verbose: - return list(whichgen(command, path, verbose, exts)) - else: - return list(absName for absName, _ in whichgen(command, path, verbose, exts)) - - -# mainline - - -def main(argv): - all = 0 - verbose = 0 - altpath = None - exts = None - try: - optlist, args = getopt.getopt( - argv[1:], - "haVvqp:e:", - ["help", "all", "version", "verbose", "quiet", "path=", "exts="], - ) - except getopt.GetoptErrsor as msg: - sys.stderr.write("which: error: %s. Your invocation was: %s\n" % (msg, argv)) - sys.stderr.write("Try 'which --help'.\n") - return 1 - for opt, optarg in optlist: - if opt in ("-h", "--help"): - print(_cmdlnUsage) - return 0 - elif opt in ("-V", "--version"): - print("which %s" % __version__) - return 0 - elif opt in ("-a", "--all"): - all = 1 - elif opt in ("-v", "--verbose"): - verbose = 1 - elif opt in ("-q", "--quiet"): - verbose = 0 - elif opt in ("-p", "--path"): - if optarg: - altpath = optarg.split(os.pathsep) - else: - altpath = [] - elif opt in ("-e", "--exts"): - if optarg: - exts = optarg.split(os.pathsep) - else: - exts = [] - - if len(args) == 0: - return -1 - - failures = 0 - for arg in args: - # print "debug: search for %r" % arg - nmatches = 0 - for absName, fromWhere in whichgen( - arg, path=altpath, verbose=verbose, exts=exts - ): - if verbose: - print("%s (%s)" % (absName, fromWhere)) - else: - print(absName) - nmatches += 1 - if not all: - break - if not nmatches: - failures += 1 - return failures - - -if __name__ == "__main__": - sys.exit(main(sys.argv)) diff --git a/xonsh/xoreutils/cat.py b/xonsh/xoreutils/cat.py deleted file mode 100644 index decbcf4..0000000 --- a/xonsh/xoreutils/cat.py +++ /dev/null @@ -1,164 +0,0 @@ -"""Implements a cat command for xonsh.""" -import os -import time -import builtins - -import xonsh.proc as xproc -from xonsh.xoreutils.util import arg_handler - - -def _cat_line( - f, sep, last_was_blank, line_count, opts, out, enc, enc_errors, read_size -): - _r = r = f.readline(size=80) - if isinstance(_r, str): - _r = r = _r.encode(enc, enc_errors) - if r == b"": - last_was_blank, line_count, read_size, True - if r.endswith(sep): - _r = _r[: -len(sep)] - this_one_blank = _r == b"" - if last_was_blank and this_one_blank and opts["squeeze_blank"]: - last_was_blank, line_count, read_size, False - last_was_blank = this_one_blank - if opts["number_all"] or (opts["number_nonblank"] and not this_one_blank): - start = ("%6d " % line_count).encode(enc, enc_errors) - _r = start + _r - line_count += 1 - if opts["show_ends"]: - _r = _r + b"$" - out.buffer.write(_r) - out.flush() - read_size += len(r) - return last_was_blank, line_count, read_size, False - - -def _cat_single_file(opts, fname, stdin, out, err, line_count=1): - env = builtins.__xonsh__.env - enc = env.get("XONSH_ENCODING") - enc_errors = env.get("XONSH_ENCODING_ERRORS") - read_size = 0 - file_size = fobj = None - if fname == "-": - f = stdin - elif os.path.isdir(fname): - print("cat: {}: Is a directory.".format(fname), file=err) - return True, line_count - elif not os.path.exists(fname): - print("cat: No such file or directory: {}".format(fname), file=err) - return True, line_count - else: - file_size = os.stat(fname).st_size - if file_size == 0: - file_size = None - fobj = open(fname, "rb") - f = xproc.NonBlockingFDReader(fobj.fileno(), timeout=0.1) - sep = os.linesep.encode(enc, enc_errors) - last_was_blank = False - while file_size is None or read_size < file_size: - try: - last_was_blank, line_count, read_size, endnow = _cat_line( - f, - sep, - last_was_blank, - line_count, - opts, - out, - enc, - enc_errors, - read_size, - ) - if endnow: - break - if last_was_blank: - time.sleep(1e-3) - except KeyboardInterrupt: - print("got except", flush=True, file=out) - break - except Exception as e: - print("xonsh:", e, flush=True, file=out) - pass - if fobj is not None: - fobj.close() - return False, line_count - - -def cat(args, stdin, stdout, stderr): - """A cat command for xonsh.""" - opts = _cat_parse_args(args) - if opts is None: - print(CAT_HELP_STR, file=stdout) - return 0 - - line_count = 1 - errors = False - if len(args) == 0: - args = ["-"] - for i in args: - o = _cat_single_file(opts, i, stdin, stdout, stderr, line_count) - if o is None: - return -1 - _e, line_count = o - errors = _e or errors - - return int(errors) - - -def _cat_parse_args(args): - out = { - "number_nonblank": False, - "number_all": False, - "squeeze_blank": False, - "show_ends": False, - } - if "--help" in args: - return - - arg_handler(args, out, "-b", "number_nonblank", True, "--number-nonblank") - arg_handler(args, out, "-n", "number_all", True, "--number") - arg_handler(args, out, "-E", "show_ends", True, "--show-ends") - arg_handler(args, out, "-s", "squeeze_blank", True, "--squeeze-blank") - arg_handler(args, out, "-T", "show_tabs", True, "--show-tabs") - - return out - - -CAT_HELP_STR = """This version of cat was written in Python for the xonsh project: http://xon.sh -Based on cat from GNU coreutils: http://www.gnu.org/software/coreutils/ - -Usage: cat [OPTION]... [FILE]... -Concatenate FILE(s), or standard input, to standard output. - - -b, --number-nonblank number nonempty output lines, overrides -n - -E, --show-ends display $ at end of each line - -n, --number number all output lines - -s, --squeeze-blank suppress repeated empty output lines - -T, --show-tabs display TAB characters as ^I - -u (ignored) - --help display this help and exit - -With no FILE, or when FILE is -, read standard input. - -Examples: - cat f - g Output f's contents, then standard input, then g's contents. - cat Copy standard input to standard output.""" - -# NOT IMPLEMENTED: -# -A, --show-all equivalent to -vET -# -e equivalent to -vE -# -t equivalent to -vT -# -v, --show-nonprinting use ^ and M- notation, except for LFD and TAB -# --version output version information and exit""" - - -def cat_main(args=None): - import sys - from xonsh.main import setup - - setup() - args = sys.argv if args is None else args - cat(args, sys.stdin, sys.stdout, sys.stderr) - - -if __name__ == "__main__": - cat_main() diff --git a/xonsh/xoreutils/echo.py b/xonsh/xoreutils/echo.py deleted file mode 100644 index c46a08f..0000000 --- a/xonsh/xoreutils/echo.py +++ /dev/null @@ -1,44 +0,0 @@ -"""Implements a simple echo command for xonsh.""" - - -def echo(args, stdin, stdout, stderr): - """A simple echo command.""" - opts = _echo_parse_args(args) - if opts is None: - return - if opts["help"]: - print(ECHO_HELP, file=stdout) - return 0 - ender = opts["end"] - args = map(str, args) - if opts["escapes"]: - args = map(lambda x: x.encode().decode("unicode_escape"), args) - print(*args, end=ender, file=stdout) - - -def _echo_parse_args(args): - out = {"escapes": False, "end": "\n", "help": False} - if "-e" in args: - args.remove("-e") - out["escapes"] = True - if "-E" in args: - args.remove("-E") - out["escapes"] = False - if "-n" in args: - args.remove("-n") - out["end"] = "" - if "-h" in args or "--help" in args: - out["help"] = True - return out - - -ECHO_HELP = """Usage: echo [OPTIONS]... [STRING]... -Echo the STRING(s) to standard output. - - -n do not include the trailing newline - -e enable interpretation of backslash escapes - -E disable interpretation of backslash escapes (default) - -h --help display this message and exit - -This version of echo was written in Python for the xonsh project: http://xon.sh -Based on echo from GNU coreutils: http://www.gnu.org/software/coreutils/""" diff --git a/xonsh/xoreutils/pwd.py b/xonsh/xoreutils/pwd.py deleted file mode 100644 index 4ed394e..0000000 --- a/xonsh/xoreutils/pwd.py +++ /dev/null @@ -1,28 +0,0 @@ -"""A pwd implementation for xonsh.""" -import os - - -def pwd(args, stdin, stdout, stderr): - """A pwd implementation""" - e = __xonsh__.env["PWD"] - if "-h" in args or "--help" in args: - print(PWD_HELP, file=stdout) - return 0 - if "-P" in args: - e = os.path.realpath(e) - print(e, file=stdout) - return 0 - - -PWD_HELP = """Usage: pwd [OPTION]... -Print the full filename of the current working directory. - - -P, --physical avoid all symlinks - --help display this help and exit - -This version of pwd was written in Python for the xonsh project: http://xon.sh -Based on pwd from GNU coreutils: http://www.gnu.org/software/coreutils/""" - - -# Not Implemented -# -L, --logical use PWD from environment, even if it contains symlinks diff --git a/xonsh/xoreutils/tee.py b/xonsh/xoreutils/tee.py deleted file mode 100644 index b9541c4..0000000 --- a/xonsh/xoreutils/tee.py +++ /dev/null @@ -1,59 +0,0 @@ -"""A tee implementation for xonsh.""" - - -def tee(args, stdin, stdout, stderr): - """A tee command for xonsh.""" - mode = "w" - if "-a" in args: - args.remove("-a") - mode = "a" - if "--append" in args: - args.remove("--append") - mode = "a" - if "--help" in args: - print(TEE_HELP, file=stdout) - return 0 - if stdin is None: - msg = "tee was not piped stdin, must have input stream to read from." - print(msg, file=stderr) - return 1 - - errors = False - files = [] - for i in args: - if i == "-": - files.append(stdout) - else: - try: - files.append(open(i, mode)) - except: - print("tee: failed to open {}".format(i), file=stderr) - errors = True - files.append(stdout) - - while True: - r = stdin.read(1024) - if r == "": - break - for i in files: - i.write(r) - for i in files: - if i != stdout: - i.close() - - return int(errors) - - -TEE_HELP = """This version of tee was written in Python for the xonsh project: http://xon.sh -Based on tee from GNU coreutils: http://www.gnu.org/software/coreutils/ - -Usage: tee [OPTION]... [FILE]... -Copy standard input to each FILE, and also to standard output. - - -a, --append append to the given FILEs, do not overwrite - --help display this help and exit - -If a FILE is -, copy again to standard output.""" - -# NOT IMPLEMENTED: -# -i, --ignore-interrupts ignore interrupt signals diff --git a/xonsh/xoreutils/tty.py b/xonsh/xoreutils/tty.py deleted file mode 100644 index 0640029..0000000 --- a/xonsh/xoreutils/tty.py +++ /dev/null @@ -1,45 +0,0 @@ -"""A tty implementation for xonsh""" -import os -import sys - - -def tty(args, stdin, stdout, stderr): - """A tty command for xonsh.""" - if "--help" in args: - print(TTY_HELP, file=stdout) - return 0 - silent = False - for i in ("-s", "--silent", "--quiet"): - if i in args: - silent = True - args.remove(i) - if len(args) > 0: - if not silent: - for i in args: - print("tty: Invalid option: {}".format(i), file=stderr) - print("Try 'tty --help' for more information", file=stderr) - return 2 - try: - fd = stdin.fileno() - except: - fd = sys.stdin.fileno() - if not os.isatty(fd): - if not silent: - print("not a tty", file=stdout) - return 1 - if not silent: - try: - print(os.ttyname(fd), file=stdout) - except: - return 3 - return 0 - - -TTY_HELP = """Usage: tty [OPTION]... -Print the file name of the terminal connected to standard input. - - -s, --silent, --quiet print nothing, only return an exit status - --help display this help and exit - -This version of tty was written in Python for the xonsh project: http://xon.sh -Based on tty from GNU coreutils: http://www.gnu.org/software/coreutils/""" diff --git a/xonsh/xoreutils/uptime.py b/xonsh/xoreutils/uptime.py deleted file mode 100644 index 2a66e9a..0000000 --- a/xonsh/xoreutils/uptime.py +++ /dev/null @@ -1,284 +0,0 @@ -""" -Provides a cross-platform way to figure out the system uptime. - -Should work on damned near any operating system you can realistically expect -to be asked to write Python code for. -If this module is invoked as a stand-alone script, it will print the current -uptime in a human-readable format, or display an error message if it can't, -to standard output. - -This file was forked from the uptime project: https://github.com/Cairnarvon/uptime -Copyright (c) 2012, Koen Crolla, All rights reserved. -""" -import os -import sys -import time -import ctypes -import struct - -import xonsh.platform as xp -import xonsh.lazyimps as xlimps -import xonsh.lazyasd as xl - - -_BOOTTIME = None - - -def _uptime_osx(): - """Returns the uptime on mac / darwin.""" - global _BOOTTIME - bt = xlimps.macutils.sysctlbyname(b"kern.boottime", return_str=False) - if len(bt) == 4: - bt = struct.unpack_from("@hh", bt) - elif len(bt) == 8: - bt = struct.unpack_from("@ii", bt) - elif len(bt) == 16: - bt = struct.unpack_from("@qq", bt) - else: - raise ValueError("length of boot time not understood: " + repr(bt)) - bt = bt[0] + bt[1] * 1e-6 - if bt == 0.0: - return None - _BOOTTIME = bt - return time.time() - bt - - -def _uptime_linux(): - """Returns uptime in seconds or None, on Linux.""" - # With procfs - try: - with open("/proc/uptime", "r") as f: - up = float(f.readline().split()[0]) - return up - except (IOError, ValueError): - pass - buf = ctypes.create_string_buffer(128) # 64 suffices on 32-bit, whatever. - if xp.LIBC.sysinfo(buf) < 0: - return None - up = struct.unpack_from("@l", buf.raw)[0] - if up < 0: - up = None - return up - - -def _boottime_linux(): - """A way to figure out the boot time directly on Linux.""" - global _BOOTTIME - try: - with open("/proc/stat", "r") as f: - for line in f: - if line.startswith("btime"): - _BOOTTIME = float(line.split()[1]) - return _BOOTTIME - except (IOError, IndexError): - return None - - -def _uptime_amiga(): - """Returns uptime in seconds or None, on AmigaOS.""" - global _BOOTTIME - try: - _BOOTTIME = os.stat("RAM:").st_ctime - return time.time() - _BOOTTIME - except (NameError, OSError): - return None - - -def _uptime_beos(): - """Returns uptime in seconds on None, on BeOS/Haiku.""" - if not hasattr(xp.LIBC, "system_time"): - return None - xp.LIBC.system_time.restype = ctypes.c_int64 - return xp.LIBC.system_time() / 1000000. - - -def _uptime_bsd(): - """Returns uptime in seconds or None, on BSD (including OS X).""" - global _BOOTTIME - if not hasattr(xp.LIBC, "sysctlbyname"): - # Not BSD. - return None - # Determine how much space we need for the response. - sz = ctypes.c_uint(0) - xp.LIBC.sysctlbyname("kern.boottime", None, ctypes.byref(sz), None, 0) - if sz.value != struct.calcsize("@LL"): - # Unexpected, let's give up. - return None - # For real now. - buf = ctypes.create_string_buffer(sz.value) - xp.LIBC.sysctlbyname("kern.boottime", buf, ctypes.byref(sz), None, 0) - sec, usec = struct.unpack_from("@LL", buf.raw) - # OS X disagrees what that second value is. - if usec > 1000000: - usec = 0. - _BOOTTIME = sec + usec / 1000000. - up = time.time() - _BOOTTIME - if up < 0: - up = None - return up - - -def _uptime_minix(): - """Returns uptime in seconds or None, on MINIX.""" - try: - with open("/proc/uptime", "r") as f: - up = float(f.read()) - return up - except (IOError, ValueError): - return None - - -def _uptime_plan9(): - """Returns uptime in seconds or None, on Plan 9.""" - # Apparently Plan 9 only has Python 2.2, which I'm not prepared to - # support. Maybe some Linuxes implement /dev/time, though, someone was - # talking about it somewhere. - try: - # The time file holds one 32-bit number representing the sec- - # onds since start of epoch and three 64-bit numbers, repre- - # senting nanoseconds since start of epoch, clock ticks, and - # clock frequency. - # -- cons(3) - with open("/dev/time", "r") as f: - s, ns, ct, cf = f.read().split() - return float(ct) / float(cf) - except (IOError, ValueError): - return None - - -def _uptime_solaris(): - """Returns uptime in seconds or None, on Solaris.""" - global _BOOTTIME - try: - kstat = ctypes.CDLL("libkstat.so") - except (AttributeError, OSError): - return None - - # kstat doesn't have uptime, but it does have boot time. - # Unfortunately, getting at it isn't perfectly straightforward. - # First, let's pretend to be kstat.h - - # Constant - KSTAT_STRLEN = 31 # According to every kstat.h I could find. - - # Data structures - class anon_union(ctypes.Union): - # The ``value'' union in kstat_named_t actually has a bunch more - # members, but we're only using it for boot_time, so we only need - # the padding and the one we're actually using. - _fields_ = [("c", ctypes.c_char * 16), ("time", ctypes.c_int)] - - class kstat_named_t(ctypes.Structure): - _fields_ = [ - ("name", ctypes.c_char * KSTAT_STRLEN), - ("data_type", ctypes.c_char), - ("value", anon_union), - ] - - # Function signatures - kstat.kstat_open.restype = ctypes.c_void_p - kstat.kstat_lookup.restype = ctypes.c_void_p - kstat.kstat_lookup.argtypes = [ - ctypes.c_void_p, - ctypes.c_char_p, - ctypes.c_int, - ctypes.c_char_p, - ] - kstat.kstat_read.restype = ctypes.c_int - kstat.kstat_read.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_void_p] - kstat.kstat_data_lookup.restype = ctypes.POINTER(kstat_named_t) - kstat.kstat_data_lookup.argtypes = [ctypes.c_void_p, ctypes.c_char_p] - - # Now, let's do something useful. - # Initialise kstat control structure. - kc = kstat.kstat_open() - if not kc: - return None - # We're looking for unix:0:system_misc:boot_time. - ksp = kstat.kstat_lookup(kc, "unix", 0, "system_misc") - if ksp and kstat.kstat_read(kc, ksp, None) != -1: - data = kstat.kstat_data_lookup(ksp, "boot_time") - if data: - _BOOTTIME = data.contents.value.time - # Clean-up. - kstat.kstat_close(kc) - if _BOOTTIME is not None: - return time.time() - _BOOTTIME - return None - - -def _uptime_syllable(): - """Returns uptime in seconds or None, on Syllable.""" - global _BOOTTIME - try: - _BOOTTIME = os.stat("/dev/pty/mst/pty0").st_mtime - return time.time() - _BOOTTIME - except (NameError, OSError): - return None - - -def _uptime_windows(): - """ - Returns uptime in seconds or None, on Windows. Warning: may return - incorrect answers after 49.7 days on versions older than Vista. - """ - if hasattr(xp.LIBC, "GetTickCount64"): - # Vista/Server 2008 or later. - xp.LIBC.GetTickCount64.restype = ctypes.c_uint64 - return xp.LIBC.GetTickCount64() / 1000. - if hasattr(xp.LIBC, "GetTickCount"): - # WinCE and Win2k or later; gives wrong answers after 49.7 days. - xp.LIBC.GetTickCount.restype = ctypes.c_uint32 - return xp.LIBC.GetTickCount() / 1000. - return None - - -@xl.lazyobject -def _UPTIME_FUNCS(): - return { - "amiga": _uptime_amiga, - "aros12": _uptime_amiga, - "beos5": _uptime_beos, - "cygwin": _uptime_linux, - "darwin": _uptime_osx, - "haiku1": _uptime_beos, - "linux": _uptime_linux, - "linux-armv71": _uptime_linux, - "linux2": _uptime_linux, - "minix3": _uptime_minix, - "sunos5": _uptime_solaris, - "syllable": _uptime_syllable, - "win32": _uptime_windows, - "wince": _uptime_windows, - } - - -def uptime(): - """Returns uptime in seconds if even remotely possible, or None if not.""" - if _BOOTTIME is not None: - return time.time() - _BOOTTIME - up = _UPTIME_FUNCS.get(sys.platform, _uptime_bsd)() - if up is None: - up = ( - _uptime_bsd() - or _uptime_plan9() - or _uptime_linux() - or _uptime_windows() - or _uptime_solaris() - or _uptime_beos() - or _uptime_amiga() - or _uptime_syllable() - or _uptime_osx() - ) - return up - - -def boottime(): - """Returns boot time if remotely possible, or None if not.""" - global _BOOTTIME - if _BOOTTIME is None: - up = uptime() - if up is None: - return None - _BOOTTIME = time.time() - up - return _BOOTTIME diff --git a/xonsh/xoreutils/util.py b/xonsh/xoreutils/util.py deleted file mode 100644 index 67abd44..0000000 --- a/xonsh/xoreutils/util.py +++ /dev/null @@ -1,19 +0,0 @@ -"""Assorted utilities for xonsh core utils.""" - - -def arg_handler(args, out, short, key, val, long=None): - """A simple argument handler for xoreutils.""" - if short in args: - args.remove(short) - if isinstance(key, (list, tuple)): - for k in key: - out[k] = val - else: - out[key] = val - if long is not None and long in args: - args.remove(long) - if isinstance(key, (list, tuple)): - for k in key: - out[k] = val - else: - out[key] = val diff --git a/xonsh/xoreutils/which.py b/xonsh/xoreutils/which.py deleted file mode 100644 index d92714a..0000000 --- a/xonsh/xoreutils/which.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Implements the which xoreutil.""" -import os -import argparse -import builtins -import functools - -from xonsh.xoreutils import _which -import xonsh.platform as xp -import xonsh.proc as xproc - - -@functools.lru_cache() -def _which_create_parser(): - desc = "Parses arguments to which wrapper" - parser = argparse.ArgumentParser("which", description=desc) - parser.add_argument( - "args", type=str, nargs="+", help="The executables or aliases to search for" - ) - parser.add_argument( - "-a", - "--all", - action="store_true", - dest="all", - help="Show all matches in globals, xonsh.aliases, $PATH", - ) - parser.add_argument( - "-s", - "--skip-alias", - action="store_true", - help="Do not search inxonsh.aliases", - dest="skip", - ) - parser.add_argument( - "-V", - "--version", - action="version", - version="{}".format(_which.__version__), - help="Display the version of the python which module " "used by xonsh", - ) - parser.add_argument( - "-v", - "--verbose", - action="store_true", - dest="verbose", - help="Print out how matches were located and show " "near misses on stderr", - ) - parser.add_argument( - "-p", - "--plain", - action="store_true", - dest="plain", - help="Do not display alias expansions or location of " - "where binaries are found. This is the " - "default behavior, but the option can be used to " - "override the --verbose option", - ) - parser.add_argument("--very-small-rocks", action=AWitchAWitch) - if xp.ON_WINDOWS: - parser.add_argument( - "-e", - "--exts", - nargs="*", - type=str, - help="Specify a list of extensions to use instead " - "of the standard list for this system. This can " - "effectively be used as an optimization to, for " - 'example, avoid stat\'s of "foo.vbs" when ' - 'searching for "foo" and you know it is not a ' - 'VisualBasic script but ".vbs" is on PATHEXT. ' - "This option is only supported on Windows", - dest="exts", - ) - return parser - - -def print_global_object(arg, stdout): - """Print the object.""" - obj = builtins.__xonsh__.ctx.get(arg) - print("global object of {}".format(type(obj)), file=stdout) - - -def print_path(abs_name, from_where, stdout, verbose=False, captured=False): - """Print the name and path of the command.""" - if xp.ON_WINDOWS: - # Use list dir to get correct case for the filename - # i.e. windows is case insensitive but case preserving - p, f = os.path.split(abs_name) - f = next(s.name for s in xp.scandir(p) if s.name.lower() == f.lower()) - abs_name = os.path.join(p, f) - if builtins.__xonsh__.env.get("FORCE_POSIX_PATHS", False): - abs_name.replace(os.sep, os.altsep) - if verbose: - print("{} ({})".format(abs_name, from_where), file=stdout) - else: - end = "" if captured else "\n" - print(abs_name, end=end, file=stdout) - - -def print_alias(arg, stdout, verbose=False): - """Print the alias.""" - if not verbose: - if not callable(builtins.aliases[arg]): - print(" ".join(builtins.aliases[arg]), file=stdout) - else: - print(arg, file=stdout) - else: - print("aliases['{}'] = {}".format(arg, builtins.aliases[arg]), file=stdout) - if callable(builtins.aliases[arg]): - builtins.__xonsh__.superhelp(builtins.aliases[arg]) - - -def which(args, stdin=None, stdout=None, stderr=None, spec=None): - """ - Checks if each arguments is a xonsh aliases, then if it's an executable, - then finally return an error code equal to the number of misses. - If '-a' flag is passed, run both to return both `xonsh` match and - `which` match. - """ - parser = _which_create_parser() - if len(args) == 0: - parser.print_usage(file=stderr) - return -1 - - pargs = parser.parse_args(args) - verbose = pargs.verbose or pargs.all - if spec is not None: - captured = spec.captured in xproc.STDOUT_CAPTURE_KINDS - else: - captured = False - if pargs.plain: - verbose = False - if xp.ON_WINDOWS: - if pargs.exts: - exts = pargs.exts - else: - exts = builtins.__xonsh__.env["PATHEXT"] - else: - exts = None - failures = [] - for arg in pargs.args: - nmatches = 0 - if pargs.all and arg in builtins.__xonsh__.ctx: - print_global_object(arg, stdout) - nmatches += 1 - if arg in builtins.aliases and not pargs.skip: - print_alias(arg, stdout, verbose) - nmatches += 1 - if not pargs.all: - continue - # which.whichgen gives the nicest 'verbose' output if PATH is taken - # from os.environ so we temporarily override it with - # __xosnh_env__['PATH'] - original_os_path = xp.os_environ["PATH"] - xp.os_environ["PATH"] = builtins.__xonsh__.env.detype()["PATH"] - matches = _which.whichgen(arg, exts=exts, verbose=verbose) - for abs_name, from_where in matches: - print_path(abs_name, from_where, stdout, verbose, captured) - nmatches += 1 - if not pargs.all: - break - xp.os_environ["PATH"] = original_os_path - if not nmatches: - failures.append(arg) - if len(failures) == 0: - return 0 - else: - print("{} not in ".format(", ".join(failures)), file=stderr, end="") - if pargs.all: - print("globals or ", file=stderr, end="") - print("$PATH", file=stderr, end="") - if not pargs.skip: - print(" or xonsh.builtins.aliases", file=stderr, end="") - print("", file=stderr, end="\n") - return len(failures) - - -class AWitchAWitch(argparse.Action): - """The Ducstring, the mother of all ducs.""" - - SUPPRESS = "==SUPPRESS==" - - def __init__( - self, option_strings, version=None, dest=SUPPRESS, default=SUPPRESS, **kwargs - ): - super().__init__( - option_strings=option_strings, dest=dest, default=default, nargs=0, **kwargs - ) - - def __call__(self, parser, namespace, values, option_string=None): - import webbrowser - - webbrowser.open("https://github.com/xonsh/xonsh/commit/f49b400") - parser.exit() diff --git a/xonsh/xoreutils/yes.py b/xonsh/xoreutils/yes.py deleted file mode 100644 index 6dad709..0000000 --- a/xonsh/xoreutils/yes.py +++ /dev/null @@ -1,25 +0,0 @@ -"""An implementation of yes for xonsh.""" - - -def yes(args, stdin, stdout, stderr): - """A yes command.""" - if "--help" in args: - print(YES_HELP, file=stdout) - return 0 - - to_print = ["y"] if len(args) == 0 else [str(i) for i in args] - - while True: - print(*to_print, file=stdout) - - return 0 - - -YES_HELP = """Usage: yes [STRING]... - or: yes OPTION -Repeatedly output a line with all specified STRING(s), or 'y'. - - --help display this help and exit - -This version of yes was written in Python for the xonsh project: http://xon.sh -Based on yes from GNU coreutils: http://www.gnu.org/software/coreutils/"""