From ccbc20ba50150ba7b7833c6b4e0b44243f4b0660 Mon Sep 17 00:00:00 2001 From: SakuraSa233 Date: Mon, 26 Nov 2018 19:39:49 +0800 Subject: [PATCH] init repository --- .gitignore | 41 + .travis.yml | 21 + CONTRIBUTING.md | 29 + LICENSE | 202 ++ MANIFEST.in | 3 + README.md | 109 + README.rst | 113 + apiconfig.py | 47 + asyncmgr.py | 101 + auto_block.py | 307 +++ auto_thread.py | 196 ++ config.json | 24 + configloader.py | 16 + db_transfer.py | 891 ++++++++ debian/changelog | 5 + debian/compat | 1 + debian/config.json | 11 + debian/control | 19 + debian/copyright | 30 + debian/docs | 2 + debian/init.d | 149 ++ debian/install | 1 + debian/rules | 5 + debian/shadowsocks.default | 12 + debian/shadowsocks.manpages | 2 + debian/source/format | 1 + debian/sslocal.1 | 59 + debian/ssserver.1 | 59 + detect.html | 1 + gnupg/__init__.py | 47 + gnupg/_ansistrm.py | 172 ++ gnupg/_logger.py | 99 + gnupg/_meta.py | 1040 ++++++++++ gnupg/_parsers.py | 1551 ++++++++++++++ gnupg/_trust.py | 103 + gnupg/_util.py | 795 +++++++ gnupg/_version.py | 11 + gnupg/copyleft.py | 749 +++++++ gnupg/gnupg.py | 1080 ++++++++++ importloader.py | 26 + logrun.sh | 6 + mudb.json | 2 + requirements.txt | 6 + run.sh | 6 + server.py | 91 + server_pool.py | 594 ++++++ setup.py | 39 + shadowsocks.sql | 24 + shadowsocks/__init__.py | 18 + shadowsocks/asyncdns.py | 554 +++++ shadowsocks/common.py | 512 +++++ shadowsocks/crypto/__init__.py | 18 + shadowsocks/crypto/aead.py | 340 +++ shadowsocks/crypto/hkdf.py | 98 + shadowsocks/crypto/openssl.py | 461 +++++ shadowsocks/crypto/rc4_md5.py | 52 + shadowsocks/crypto/sodium.py | 453 ++++ shadowsocks/crypto/table.py | 189 ++ shadowsocks/crypto/util.py | 168 ++ shadowsocks/daemon.py | 208 ++ shadowsocks/encrypt.py | 275 +++ shadowsocks/encrypt_test.py | 53 + shadowsocks/eventloop.py | 263 +++ shadowsocks/local.py | 89 + shadowsocks/logrun.sh | 6 + shadowsocks/lru_cache.py | 184 ++ shadowsocks/manager.py | 291 +++ shadowsocks/obfs.py | 119 ++ shadowsocks/obfsplugin/__init__.py | 18 + shadowsocks/obfsplugin/auth.py | 1234 +++++++++++ shadowsocks/obfsplugin/auth_chain.py | 855 ++++++++ shadowsocks/obfsplugin/http_simple.py | 351 ++++ shadowsocks/obfsplugin/obfs_tls.py | 312 +++ shadowsocks/obfsplugin/plain.py | 103 + shadowsocks/obfsplugin/simple_obfs_http.py | 171 ++ shadowsocks/obfsplugin/simple_obfs_tls.py | 295 +++ shadowsocks/obfsplugin/verify.py | 360 ++++ shadowsocks/ordereddict.py | 218 ++ shadowsocks/run.sh | 6 + shadowsocks/server.py | 235 +++ shadowsocks/shell.py | 482 +++++ shadowsocks/stop.sh | 3 + shadowsocks/tail.sh | 3 + shadowsocks/tcprelay.py | 2170 ++++++++++++++++++++ shadowsocks/udprelay.py | 1050 ++++++++++ shadowsocks/version.py | 19 + speedtest/.gitignore | 27 + speedtest/.travis.yml | 37 + speedtest/CONTRIBUTING.md | 39 + speedtest/LICENSE | 202 ++ speedtest/MANIFEST.in | 2 + speedtest/README.rst | 133 ++ speedtest/__init__.py | 0 speedtest/setup.cfg | 2 + speedtest/setup.py | 94 + speedtest/speedtest-cli.1 | 118 ++ speedtest/speedtest.py | 1431 +++++++++++++ speedtest/speedtest_cli.py | 34 + speedtest/tox.ini | 21 + speedtest_thread.py | 194 ++ stop.sh | 3 + switchrule.py | 28 + tail.sh | 3 + tests/aes-cfb1.json | 10 + tests/aes-cfb8.json | 10 + tests/aes-ctr.json | 10 + tests/aes.json | 10 + tests/assert.sh | 148 ++ tests/chacha20.json | 10 + tests/client-multi-server-ip.json | 10 + tests/coverage_server.py | 45 + tests/fastopen.json | 10 + tests/ipv6-client-side.json | 10 + tests/ipv6.json | 10 + tests/jenkins.sh | 82 + tests/libsodium/install.sh | 10 + tests/nose_plugin.py | 43 + tests/rc4-md5.json | 10 + tests/salsa20-ctr.json | 10 + tests/salsa20.json | 10 + tests/server-multi-passwd-client-side.json | 8 + tests/server-multi-passwd-table.json | 19 + tests/server-multi-passwd.json | 17 + tests/server-multi-ports.json | 8 + tests/setup_tc.sh | 18 + tests/socksify/install.sh | 10 + tests/socksify/socks.conf | 5 + tests/table.json | 10 + tests/test.py | 158 ++ tests/test_command.sh | 32 + tests/test_daemon.sh | 43 + tests/test_large_file.sh | 24 + tests/test_udp_src.py | 83 + tests/test_udp_src.sh | 23 + tests/workers.json | 10 + utils/README.md | 9 + utils/autoban.py | 53 + utils/fail2ban/shadowsocks.conf | 5 + web_transfer.py | 693 +++++++ webapi_utils.py | 69 + 140 files changed, 25247 insertions(+) create mode 100644 .gitignore create mode 100644 .travis.yml create mode 100644 CONTRIBUTING.md create mode 100644 LICENSE create mode 100644 MANIFEST.in create mode 100644 README.md create mode 100644 README.rst create mode 100644 apiconfig.py create mode 100644 asyncmgr.py create mode 100644 auto_block.py create mode 100644 auto_thread.py create mode 100644 config.json create mode 100644 configloader.py create mode 100644 db_transfer.py create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/config.json create mode 100644 debian/control create mode 100644 debian/copyright create mode 100644 debian/docs create mode 100644 debian/init.d create mode 100644 debian/install create mode 100644 debian/rules create mode 100644 debian/shadowsocks.default create mode 100644 debian/shadowsocks.manpages create mode 100644 debian/source/format create mode 100644 debian/sslocal.1 create mode 100644 debian/ssserver.1 create mode 100644 detect.html create mode 100644 gnupg/__init__.py create mode 100644 gnupg/_ansistrm.py create mode 100644 gnupg/_logger.py create mode 100644 gnupg/_meta.py create mode 100644 gnupg/_parsers.py create mode 100644 gnupg/_trust.py create mode 100644 gnupg/_util.py create mode 100644 gnupg/_version.py create mode 100644 gnupg/copyleft.py create mode 100644 gnupg/gnupg.py create mode 100644 importloader.py create mode 100644 logrun.sh create mode 100644 mudb.json create mode 100644 requirements.txt create mode 100644 run.sh create mode 100644 server.py create mode 100644 server_pool.py create mode 100644 setup.py create mode 100644 shadowsocks.sql create mode 100644 shadowsocks/__init__.py create mode 100644 shadowsocks/asyncdns.py create mode 100644 shadowsocks/common.py create mode 100644 shadowsocks/crypto/__init__.py create mode 100644 shadowsocks/crypto/aead.py create mode 100644 shadowsocks/crypto/hkdf.py create mode 100644 shadowsocks/crypto/openssl.py create mode 100644 shadowsocks/crypto/rc4_md5.py create mode 100644 shadowsocks/crypto/sodium.py create mode 100644 shadowsocks/crypto/table.py create mode 100644 shadowsocks/crypto/util.py create mode 100644 shadowsocks/daemon.py create mode 100644 shadowsocks/encrypt.py create mode 100644 shadowsocks/encrypt_test.py create mode 100644 shadowsocks/eventloop.py create mode 100644 shadowsocks/local.py create mode 100644 shadowsocks/logrun.sh create mode 100644 shadowsocks/lru_cache.py create mode 100644 shadowsocks/manager.py create mode 100644 shadowsocks/obfs.py create mode 100644 shadowsocks/obfsplugin/__init__.py create mode 100644 shadowsocks/obfsplugin/auth.py create mode 100644 shadowsocks/obfsplugin/auth_chain.py create mode 100644 shadowsocks/obfsplugin/http_simple.py create mode 100644 shadowsocks/obfsplugin/obfs_tls.py create mode 100644 shadowsocks/obfsplugin/plain.py create mode 100644 shadowsocks/obfsplugin/simple_obfs_http.py create mode 100644 shadowsocks/obfsplugin/simple_obfs_tls.py create mode 100644 shadowsocks/obfsplugin/verify.py create mode 100644 shadowsocks/ordereddict.py create mode 100644 shadowsocks/run.sh create mode 100644 shadowsocks/server.py create mode 100644 shadowsocks/shell.py create mode 100644 shadowsocks/stop.sh create mode 100644 shadowsocks/tail.sh create mode 100644 shadowsocks/tcprelay.py create mode 100644 shadowsocks/udprelay.py create mode 100644 shadowsocks/version.py create mode 100644 speedtest/.gitignore create mode 100644 speedtest/.travis.yml create mode 100644 speedtest/CONTRIBUTING.md create mode 100644 speedtest/LICENSE create mode 100644 speedtest/MANIFEST.in create mode 100644 speedtest/README.rst create mode 100644 speedtest/__init__.py create mode 100644 speedtest/setup.cfg create mode 100644 speedtest/setup.py create mode 100644 speedtest/speedtest-cli.1 create mode 100644 speedtest/speedtest.py create mode 100644 speedtest/speedtest_cli.py create mode 100644 speedtest/tox.ini create mode 100644 speedtest_thread.py create mode 100644 stop.sh create mode 100644 switchrule.py create mode 100644 tail.sh create mode 100644 tests/aes-cfb1.json create mode 100644 tests/aes-cfb8.json create mode 100644 tests/aes-ctr.json create mode 100644 tests/aes.json create mode 100644 tests/assert.sh create mode 100644 tests/chacha20.json create mode 100644 tests/client-multi-server-ip.json create mode 100644 tests/coverage_server.py create mode 100644 tests/fastopen.json create mode 100644 tests/ipv6-client-side.json create mode 100644 tests/ipv6.json create mode 100644 tests/jenkins.sh create mode 100644 tests/libsodium/install.sh create mode 100644 tests/nose_plugin.py create mode 100644 tests/rc4-md5.json create mode 100644 tests/salsa20-ctr.json create mode 100644 tests/salsa20.json create mode 100644 tests/server-multi-passwd-client-side.json create mode 100644 tests/server-multi-passwd-table.json create mode 100644 tests/server-multi-passwd.json create mode 100644 tests/server-multi-ports.json create mode 100644 tests/setup_tc.sh create mode 100644 tests/socksify/install.sh create mode 100644 tests/socksify/socks.conf create mode 100644 tests/table.json create mode 100644 tests/test.py create mode 100644 tests/test_command.sh create mode 100644 tests/test_daemon.sh create mode 100644 tests/test_large_file.sh create mode 100644 tests/test_udp_src.py create mode 100644 tests/test_udp_src.sh create mode 100644 tests/workers.json create mode 100644 utils/README.md create mode 100644 utils/autoban.py create mode 100644 utils/fail2ban/shadowsocks.conf create mode 100644 web_transfer.py create mode 100644 webapi_utils.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..ac8052d --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +*.py[co] + +ca.pem +client-cert.pem +client-key.pem +mysql.zip +ssserver.log +ssshell.asc +user-config.json +userapiconfig.py +user-detect.html + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +htmlcov +.coverage* +.tox + +#Translations +*.mo + +#Mr Developer +.mr.developer.cfg + +.DS_Store +.idea diff --git a/.travis.yml b/.travis.yml new file mode 100644 index 0000000..014fa07 --- /dev/null +++ b/.travis.yml @@ -0,0 +1,21 @@ +language: python +python: + - 2.6 + - 2.7 + - 3.3 + - 3.4 +cache: + directories: + - dante-1.4.0 +before_install: + - sudo apt-get update -qq + - sudo apt-get install -qq build-essential dnsutils iproute nginx bc + - sudo dd if=/dev/urandom of=/usr/share/nginx/www/file bs=1M count=10 + - sudo sh -c "echo '127.0.0.1 localhost' > /etc/hosts" + - sudo service nginx restart + - pip install pep8 pyflakes nose coverage PySocks cymysql + - sudo tests/socksify/install.sh + - sudo tests/libsodium/install.sh + - sudo tests/setup_tc.sh +script: + - tests/jenkins.sh diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..fbdb9c1 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,29 @@ +How to Contribute +================= + +Pull Requests +------------- + +1. Pull requests are welcome. If you would like to add a large feature +or make a significant change, make sure to open an issue to discuss with +people first. +2. Follow PEP8. +3. Make sure to pass the unit tests. Write unit tests for new modules if +needed. + +Issues +------ + +1. Only bugs and feature requests are accepted here. +2. We'll only work on important features. If the feature you're asking only +benefits a few people, you'd better implement the feature yourself and send us +a pull request, or ask some of your friends to do so. +3. We don't answer questions of any other types here. Since very few people +are watching the issue tracker here, you'll probably get no help from here. +Read [Troubleshooting] and get help from forums or [mailing lists]. +4. Issues in languages other than English will be Google translated into English +later. + + +[Troubleshooting]: https://github.com/clowwindy/shadowsocks/wiki/Troubleshooting +[mailing lists]: https://groups.google.com/forum/#!forum/shadowsocks diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/MANIFEST.in b/MANIFEST.in new file mode 100644 index 0000000..1882dd7 --- /dev/null +++ b/MANIFEST.in @@ -0,0 +1,3 @@ +recursive-include shadowsocks *.py +include README.rst +include LICENSE diff --git a/README.md b/README.md new file mode 100644 index 0000000..d3d2e7b --- /dev/null +++ b/README.md @@ -0,0 +1,109 @@ +shadowsocks +=========== + +[![PyPI version]][PyPI] +[![Build Status]][Travis CI] +[![Coverage Status]][Coverage] + +A fast tunnel proxy that helps you bypass firewalls. + +Server +------ + +### Attention +**Do not** belive any word in this README, please read wiki. + +### Install + +Debian / Ubuntu: + + apt-get install python-pip + pip install shadowsocks + +CentOS: + + yum install python-setuptools && easy_install pip + pip install shadowsocks + +Windows: + +See [Install Server on Windows] + +### Usage + + ssserver -p 443 -k password -m aes-256-cfb + +To run in the background: + + sudo ssserver -p 443 -k password -m aes-256-cfb --user nobody -d start + +To stop: + + sudo ssserver -d stop + +To check the log: + + sudo less /var/log/shadowsocks.log + +Check all the options via `-h`. You can also use a [Configuration] file +instead. + +Client +------ + +* [Windows] / [OS X] +* [Android] / [iOS] +* [OpenWRT] + +Use GUI clients on your local PC/phones. Check the README of your client +for more information. + +Documentation +------------- + +You can find all the documentation in the [Wiki]. + +License +------- + +Copyright 2015 clowwindy + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +License for the specific language governing permissions and limitations +under the License. + +Bugs and Issues +---------------- + +* [Troubleshooting] +* [Issue Tracker] +* [Mailing list] + + + +[Android]: https://github.com/shadowsocks/shadowsocks-android +[Build Status]: https://travis-ci.org/falseen/shadowsocks.svg?branch=manyuser-travis +[Configuration]: https://github.com/shadowsocks/shadowsocks/wiki/Configuration-via-Config-File +[Coverage Status]: https://jenkins.shadowvpn.org/result/shadowsocks +[Coverage]: https://jenkins.shadowvpn.org/job/Shadowsocks/ws/PYENV/py34/label/linux/htmlcov/index.html +[Debian sid]: https://packages.debian.org/unstable/python/shadowsocks +[iOS]: https://github.com/shadowsocks/shadowsocks-iOS/wiki/Help +[Issue Tracker]: https://github.com/shadowsocks/shadowsocks/issues?state=open +[Install Server on Windows]: https://github.com/shadowsocks/shadowsocks/wiki/Install-Shadowsocks-Server-on-Windows +[Mailing list]: https://groups.google.com/group/shadowsocks +[OpenWRT]: https://github.com/shadowsocks/openwrt-shadowsocks +[OS X]: https://github.com/shadowsocks/shadowsocks-iOS/wiki/Shadowsocks-for-OSX-Help +[PyPI]: https://pypi.python.org/pypi/shadowsocks +[PyPI version]: https://img.shields.io/pypi/v/shadowsocks.svg?style=flat +[Travis CI]: https://travis-ci.org/falseen/shadowsocks +[Troubleshooting]: https://github.com/shadowsocks/shadowsocks/wiki/Troubleshooting +[Wiki]: https://github.com/shadowsocks/shadowsocks/wiki +[Windows]: https://github.com/shadowsocks/shadowsocks-csharp diff --git a/README.rst b/README.rst new file mode 100644 index 0000000..bf2a3ec --- /dev/null +++ b/README.rst @@ -0,0 +1,113 @@ +shadowsocks +=========== + +|PyPI version| |Build Status| |Coverage Status| + +A fast tunnel proxy that helps you bypass firewalls. + +Server +------ + +Install +~~~~~~~ + +Debian / Ubuntu: + +:: + + apt-get install python-pip + pip install shadowsocks + +CentOS: + +:: + + yum install python-setuptools && easy_install pip + pip install shadowsocks + +Windows: + +See `Install Server on +Windows `__ + +Usage +~~~~~ + +:: + + ssserver -p 443 -k password -m rc4-md5 + +To run in the background: + +:: + + sudo ssserver -p 443 -k password -m rc4-md5 --user nobody -d start + +To stop: + +:: + + sudo ssserver -d stop + +To check the log: + +:: + + sudo less /var/log/shadowsocks.log + +Check all the options via ``-h``. You can also use a +`Configuration `__ +file instead. + +Client +------ + +- `Windows `__ + / `OS + X `__ +- `Android `__ + / `iOS `__ +- `OpenWRT `__ + +Use GUI clients on your local PC/phones. Check the README of your client +for more information. + +Documentation +------------- + +You can find all the documentation in the +`Wiki `__. + +License +------- + +Copyright 2015 clowwindy + +Licensed under the Apache License, Version 2.0 (the "License"); you may +not use this file except in compliance with the License. You may obtain +a copy of the License at + +:: + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +Bugs and Issues +--------------- + +- `Troubleshooting `__ +- `Issue + Tracker `__ +- `Mailing list `__ + +.. |PyPI version| image:: https://img.shields.io/pypi/v/shadowsocks.svg?style=flat + :target: https://pypi.python.org/pypi/shadowsocks +.. |Build Status| image:: https://img.shields.io/travis/shadowsocks/shadowsocks/master.svg?style=flat + :target: https://travis-ci.org/shadowsocks/shadowsocks +.. |Coverage Status| image:: https://jenkins.shadowvpn.org/result/shadowsocks + :target: https://jenkins.shadowvpn.org/job/Shadowsocks/ws/PYENV/py34/label/linux/htmlcov/index.html diff --git a/apiconfig.py b/apiconfig.py new file mode 100644 index 0000000..d9e2368 --- /dev/null +++ b/apiconfig.py @@ -0,0 +1,47 @@ +# Config +NODE_ID = 1 + + +# hour,set 0 to disable +SPEEDTEST = 6 +CLOUDSAFE = 1 +ANTISSATTACK = 0 +AUTOEXEC = 0 + +MU_SUFFIX = 'zhaoj.in' +MU_REGEX = '%5m%id.%suffix' + +SERVER_PUB_ADDR = '127.0.0.1' # mujson_mgr need this to generate ssr link +API_INTERFACE = 'modwebapi' # glzjinmod, modwebapi + +WEBAPI_URL = 'https://zhaoj.in' +WEBAPI_TOKEN = 'glzjin' + +# mudb +MUDB_FILE = 'mudb.json' + +# Mysql +MYSQL_HOST = '127.0.0.1' +MYSQL_PORT = 3306 +MYSQL_USER = 'ss' +MYSQL_PASS = 'ss' +MYSQL_DB = 'shadowsocks' + +MYSQL_SSL_ENABLE = 0 +MYSQL_SSL_CA = '' +MYSQL_SSL_CERT = '' +MYSQL_SSL_KEY = '' + +# API +API_HOST = '127.0.0.1' +API_PORT = 80 +API_PATH = '/mu/v2/' +API_TOKEN = 'abcdef' +API_UPDATE_TIME = 60 + +# Manager (ignore this) +MANAGE_PASS = 'ss233333333' +# if you want manage in other server you should set this value to global ip +MANAGE_BIND_IP = '127.0.0.1' +# make sure this port is idle +MANAGE_PORT = 23333 diff --git a/asyncmgr.py b/asyncmgr.py new file mode 100644 index 0000000..a3ed392 --- /dev/null +++ b/asyncmgr.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014 clowwindy +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import time +import os +import socket +import struct +import re +import logging +from shadowsocks import common +from shadowsocks import lru_cache +from shadowsocks import eventloop +import server_pool +import Config + + +class ServerMgr(object): + + def __init__(self): + self._loop = None + self._request_id = 1 + self._hosts = {} + self._hostname_status = {} + self._hostname_to_cb = {} + self._cb_to_hostname = {} + self._last_time = time.time() + self._sock = None + self._servers = None + + def add_to_loop(self, loop): + if self._loop: + raise Exception('already add to loop') + self._loop = loop + # TODO when dns server is IPv6 + self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + self._sock.bind((Config.MANAGE_BIND_IP, Config.MANAGE_PORT)) + self._sock.setblocking(False) + loop.add(self._sock, eventloop.POLL_IN, self) + + def _handle_data(self, sock): + data, addr = sock.recvfrom(128) + # manage pwd:port:passwd:action + args = data.split(':') + if len(args) < 4: + return + if args[0] == Config.MANAGE_PASS: + if args[3] == '0': + server_pool.ServerPool.get_instance().cb_del_server(args[1]) + elif args[3] == '1': + server_pool.ServerPool.get_instance( + ).new_server(args[1], args[2]) + + def handle_event(self, sock, fd, event): + if sock != self._sock: + return + if event & eventloop.POLL_ERR: + logging.error('mgr socket err') + self._loop.remove(self._sock) + self._sock.close() + # TODO when dns server is IPv6 + self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + self._sock.setblocking(False) + self._loop.add(self._sock, eventloop.POLL_IN, self) + else: + self._handle_data(sock) + + def close(self): + if self._sock: + if self._loop: + self._loop.remove(self._sock) + self._sock.close() + self._sock = None + + +def test(): + pass + +if __name__ == '__main__': + test() diff --git a/auto_block.py b/auto_block.py new file mode 100644 index 0000000..cee677b --- /dev/null +++ b/auto_block.py @@ -0,0 +1,307 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +import logging +import time +import sys +import os +import configloader +import importloader +import socket +import re +import platform +import fcntl +from shadowsocks import common, shell + + +class AutoBlock(object): + + def __init__(self): + import threading + self.event = threading.Event() + self.start_line = self.file_len("/etc/hosts.deny") + self.has_stopped = False + + def get_ip(self, text): + if common.match_ipv4_address(text) is not None: + return common.match_ipv4_address(text) + else: + if common.match_ipv6_address(text) is not None: + return common.match_ipv6_address(text) + return None + + def file_len(self, fname): + return sum(1 for line in open(fname)) + + def auto_block_thread(self): + global webapi + server_ip = socket.gethostbyname( + configloader.get_config().MYSQL_HOST) + + if configloader.get_config().API_INTERFACE == 'modwebapi': + # 读取节点IP + # SELECT * FROM `ss_node` where `node_ip` != '' + node_ip_list = [] + data = webapi.getApi('nodes') + for node in data: + temp_list = node['node_ip'].split(',') + node_ip_list.append(temp_list[0]) + else: + import cymysql + if configloader.get_config().MYSQL_SSL_ENABLE == 1: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8', + ssl={ + 'ca': configloader.get_config().MYSQL_SSL_CA, + 'cert': configloader.get_config().MYSQL_SSL_CERT, + 'key': configloader.get_config().MYSQL_SSL_KEY}) + else: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8') + conn.autocommit(True) + + # 读取节点IP + # SELECT * FROM `ss_node` where `node_ip` != '' + node_ip_list = [] + cur = conn.cursor() + cur.execute( + "SELECT `node_ip` FROM `ss_node` where `node_ip` != ''") + for r in cur.fetchall(): + temp_list = str(r[0]).split(',') + node_ip_list.append(temp_list[0]) + cur.close() + + deny_file = open('/etc/hosts.deny') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_lines = deny_file.readlines() + deny_file.close() + + logging.info("Read hosts.deny from line " + str(self.start_line)) + real_deny_list = deny_lines[self.start_line:] + + denyed_ip_list = [] + data = [] + for line in real_deny_list: + if self.get_ip(line) and line.find('#') != 0: + ip = self.get_ip(line) + + if str(ip).find(str(server_ip)) != -1: + i = 0 + + for line in deny_lines: + if line.find(ip) != -1: + del deny_lines[i] + i = i + 1 + + deny_file = open("/etc/hosts.deny", "w+") + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + for line in deny_lines: + deny_file.write(line) + deny_file.close() + + continue + + has_match_node = False + for node_ip in node_ip_list: + if str(ip).find(node_ip) != -1: + i = 0 + + for line in deny_lines: + if line.find(ip) != -1: + del deny_lines[i] + i = i + 1 + + deny_file = open("/etc/hosts.deny", "w+") + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + for line in deny_lines: + deny_file.write(line) + deny_file.close() + + has_match_node = True + continue + + if has_match_node: + continue + + if configloader.get_config().API_INTERFACE == 'modwebapi': + data.append({'ip': ip}) + logging.info("Block ip:" + str(ip)) + else: + cur = conn.cursor() + cur.execute( + "SELECT * FROM `blockip` where `ip` = '" + str(ip) + "'") + rows = cur.fetchone() + cur.close() + + if rows is not None: + continue + + cur = conn.cursor() + cur.execute( + "INSERT INTO `blockip` (`id`, `nodeid`, `ip`, `datetime`) VALUES (NULL, '" + + str( + configloader.get_config().NODE_ID) + + "', '" + + str(ip) + + "', unix_timestamp())") + cur.close() + + logging.info("Block ip:" + str(ip)) + + denyed_ip_list.append(ip) + + if configloader.get_config().API_INTERFACE == 'modwebapi': + webapi.postApi( + 'func/block_ip', {'node_id': configloader.get_config().NODE_ID}, {'data': data}) + + if configloader.get_config().API_INTERFACE == 'modwebapi': + rows = webapi.getApi('func/block_ip') + else: + cur = conn.cursor() + cur.execute( + "SELECT * FROM `blockip` where `datetime`>unix_timestamp()-60") + rows = cur.fetchall() + cur.close() + + deny_str = "" + deny_str_at = "" + + for row in rows: + if configloader.get_config().API_INTERFACE == 'modwebapi': + node = row['nodeid'] + ip = self.get_ip(row['ip']) + else: + node = row[1] + ip = self.get_ip(row[2]) + + if ip is not None: + + if str(node) == str(configloader.get_config().NODE_ID): + if configloader.get_config().ANTISSATTACK == 1 and configloader.get_config( + ).CLOUDSAFE == 1 and ip not in denyed_ip_list: + if common.is_ip(ip): + if common.is_ip(ip) == socket.AF_INET: + os.system( + 'route add -host %s gw 127.0.0.1' % str(ip)) + deny_str = deny_str + "\nALL: " + str(ip) + else: + os.system( + 'ip -6 route add ::1/128 via %s/128' % str(ip)) + deny_str = deny_str + \ + "\nALL: [" + str(ip) + "]/128" + + logging.info("Remote Block ip:" + str(ip)) + else: + if common.is_ip(ip): + if common.is_ip(ip) == socket.AF_INET: + os.system( + 'route add -host %s gw 127.0.0.1' % str(ip)) + deny_str = deny_str + "\nALL: " + str(ip) + else: + os.system( + 'ip -6 route add ::1/128 via %s/128' % + str(ip)) + deny_str = deny_str + \ + "\nALL: [" + str(ip) + "]/128" + logging.info("Remote Block ip:" + str(ip)) + + deny_file = open('/etc/hosts.deny', 'a') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_file.write(deny_str) + deny_file.close() + + if configloader.get_config().ANTISSATTACK == 1 and configloader.get_config().CLOUDSAFE == 1: + deny_file = open('/etc/hosts.deny', 'a') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_file.write(deny_str_at) + deny_file.close() + + if configloader.get_config().API_INTERFACE == 'modwebapi': + rows = webapi.getApi('func/unblock_ip') + else: + cur = conn.cursor() + cur.execute( + "SELECT * FROM `unblockip` where `datetime`>unix_timestamp()-60") + rows = cur.fetchall() + cur.close() + conn.close() + + deny_file = open('/etc/hosts.deny') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_lines = deny_file.readlines() + deny_file.close() + + i = 0 + + for line in deny_lines: + for row in rows: + if configloader.get_config().API_INTERFACE == 'modwebapi': + ip = str(row['ip']) + else: + ip = str(row[1]) + if line.find(ip) != -1: + del deny_lines[i] + if common.is_ip(ip): + if common.is_ip(ip) == socket.AF_INET: + os.system( + 'route del -host %s gw 127.0.0.1' % str(ip)) + else: + os.system( + 'ip -6 route del ::1/128 via %s/128' % + str(ip)) + logging.info("Unblock ip:" + str(ip)) + i = i + 1 + + deny_file = open("/etc/hosts.deny", "w+") + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + for line in deny_lines: + deny_file.write(line) + deny_file.close() + + self.start_line = self.file_len("/etc/hosts.deny") + + @staticmethod + def thread_db(obj): + if configloader.get_config().CLOUDSAFE == 0 or platform.system() != 'Linux': + return + + if configloader.get_config().API_INTERFACE == 'modwebapi': + import webapi_utils + global webapi + webapi = webapi_utils.WebApi() + + global db_instance + db_instance = obj() + + try: + while True: + try: + db_instance.auto_block_thread() + except Exception as e: + import traceback + trace = traceback.format_exc() + logging.error(trace) + #logging.warn('db thread except:%s' % e) + if db_instance.event.wait(60): + break + if db_instance.has_stopped: + break + except KeyboardInterrupt as e: + pass + db_instance = None + + @staticmethod + def thread_db_stop(): + global db_instance + db_instance.has_stopped = True + db_instance.event.set() diff --git a/auto_thread.py b/auto_thread.py new file mode 100644 index 0000000..1260ece --- /dev/null +++ b/auto_thread.py @@ -0,0 +1,196 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +import logging +import time +import sys +import os +import configloader +import importloader +import gnupg +import threading +import subprocess +import platform +from shadowsocks import shell + + +class AutoExec(object): + + def __init__(self): + import threading + self.event = threading.Event() + + self.gpg = gnupg.GPG("/tmp/ssshell") + self.key_data = open('ssshell.asc').read() + self.import_result = self.gpg.import_keys(self.key_data) + self.public_keys = self.gpg.list_keys() + + self.has_stopped = False + + def run_command(self, command, id): + value = subprocess.check_output(command.split(' ')).decode('utf-8') + if configloader.get_config().API_INTERFACE == 'modwebapi': + global webapi + webapi.postApi('func/autoexec', {'node_id': configloader.get_config().NODE_ID}, {'data': [{'value': 'NodeID:' + str(configloader.get_config( + ).NODE_ID) + ' Exec Command ID:' + str(configloader.get_config().NODE_ID) + " Result:\n" + str(value), 'sign': str(value), 'type': 2}]}) + else: + import cymysql + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8') + conn.autocommit(True) + cur = conn.cursor() + cur.execute( + "INSERT INTO `auto` (`id`, `value`, `sign`, `datetime`,`type`) VALUES (NULL, 'NodeID:" + + str( + configloader.get_config().NODE_ID) + + " Result:\n" + + str(value) + + "', 'NOT', unix_timestamp(),'2')") + rows = cur.fetchall() + cur.close() + conn.close() + + def auto_thread(self): + + if configloader.get_config().API_INTERFACE == 'modwebapi': + rows = webapi.getApi( + 'func/autoexec', {'node_id': configloader.get_config().NODE_ID}) + else: + import cymysql + if configloader.get_config().MYSQL_SSL_ENABLE == 1: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8', + ssl={ + 'ca': configloader.get_config().MYSQL_SSL_CA, + 'cert': configloader.get_config().MYSQL_SSL_CERT, + 'key': configloader.get_config().MYSQL_SSL_KEY}) + else: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8') + conn.autocommit(True) + cur = conn.cursor() + cur.execute( + "SELECT * FROM `auto` where `datetime`>unix_timestamp()-60 AND `type`=1") + rows = cur.fetchall() + cur.close() + + for row in rows: + if configloader.get_config().API_INTERFACE == 'modwebapi': + id = row['id'] + data = row['value'] + sign = row['sign'] + else: + id = row[0] + data = row[2] + sign = row[3] + verify_data = "-----BEGIN PGP SIGNED MESSAGE-----\n" + \ + "Hash: SHA256\n" + \ + "\n" + \ + data + "\n" + \ + "-----BEGIN PGP SIGNATURE-----\n" + \ + "Version: GnuPG v2\n" + \ + "\n" + \ + sign + "\n" + \ + "-----END PGP SIGNATURE-----\n" + + verified = self.gpg.verify(verify_data) + is_verified = 0 + for key in self.public_keys: + if key['keyid'] == verified.key_id: + is_verified = 1 + break + + if is_verified == 1: + if configloader.get_config().API_INTERFACE == 'modwebapi': + webapi.postApi( + 'func/autoexec', { + 'node_id': configloader.get_config().NODE_ID}, { + 'data': [ + { + 'value': 'NodeID:' + str( + configloader.get_config().NODE_ID) + ' Exec Command ID:' + str( + configloader.get_config().NODE_ID) + ' Starting....', 'sign': str( + configloader.get_config().NODE_ID) + '-' + str(id), 'type': 2}]}) + logging.info("Running the command:" + data) + self.run_command(data, id) + else: + cur = conn.cursor() + cur.execute("SELECT * FROM `auto` where `sign`='" + + str(configloader.get_config().NODE_ID) + + "-" + + str(id) + + "'") + if cur.fetchone() is None: + cur_c = conn.cursor() + cur_c.execute("INSERT INTO `auto` (`id`, `value`, `sign`, `datetime`,`type`) VALUES (NULL, 'NodeID:" + + str(configloader.get_config().NODE_ID) + + " Exec Command ID:" + + str(configloader.get_config().NODE_ID) + + " Starting....', '" + + str(configloader.get_config().NODE_ID) + + "-" + + str(id) + + "', unix_timestamp(),'2')") + cur_c.close() + + logging.info("Running the command:" + data) + self.run_command(data, id) + cur.close() + else: + logging.info( + "Running the command, but verify faild:" + data) + + if configloader.get_config().API_INTERFACE != 'modwebapi': + conn.commit() + conn.close() + + @staticmethod + def thread_db(obj): + if configloader.get_config().AUTOEXEC == 0 or platform.system() != 'Linux': + return + + if configloader.get_config().API_INTERFACE == 'modwebapi': + import webapi_utils + global webapi + webapi = webapi_utils.WebApi() + + global db_instance + db_instance = obj() + + try: + while True: + try: + db_instance.auto_thread() + except Exception as e: + import traceback + trace = traceback.format_exc() + logging.error(trace) + #logging.warn('db thread except:%s' % e) + if db_instance.event.wait(60): + break + if db_instance.has_stopped: + break + except KeyboardInterrupt as e: + pass + db_instance = None + + @staticmethod + def thread_db_stop(): + global db_instance + db_instance.has_stopped = True + db_instance.event.set() diff --git a/config.json b/config.json new file mode 100644 index 0000000..73f450d --- /dev/null +++ b/config.json @@ -0,0 +1,24 @@ +{ + "server": "0.0.0.0", + "server_ipv6": "::", + "server_port": 8388, + "local_address": "127.0.0.1", + "local_port": 1080, + + "password": "m", + "timeout": 120, + "udp_timeout": 60, + "method": "aes-256-cfb", + "protocol": "auth_aes128_md5", + "protocol_param": "", + "obfs": "tls1.2_ticket_auth_compatible", + "obfs_param": "", + "speed_limit_per_con": 0, + + "dns_ipv6": false, + "connect_verbose_info": 0, + "connect_hex_data": 0, + "redirect": "", + "fast_open": false, + "friendly_detect": 1 +} diff --git a/configloader.py b/configloader.py new file mode 100644 index 0000000..fb4a4c1 --- /dev/null +++ b/configloader.py @@ -0,0 +1,16 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- +import importloader + +g_config = None + + +def load_config(): + global g_config + g_config = importloader.loads(['userapiconfig', 'apiconfig']) + + +def get_config(): + return g_config + +load_config() diff --git a/db_transfer.py b/db_transfer.py new file mode 100644 index 0000000..fe09bd7 --- /dev/null +++ b/db_transfer.py @@ -0,0 +1,891 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +import logging +import time +import sys +import os +import socket +from server_pool import ServerPool +import traceback +from shadowsocks import common, shell, lru_cache +from configloader import load_config, get_config +import importloader +import platform +import datetime +import fcntl + + +switchrule = None +db_instance = None + + +class DbTransfer(object): + + def __init__(self): + import threading + self.last_update_transfer = {} + self.event = threading.Event() + self.port_uid_table = {} + self.uid_port_table = {} + self.node_speedlimit = 0.00 + self.traffic_rate = 0.0 + + self.detect_text_list = {} + self.detect_text_ischanged = False + + self.detect_hex_list = {} + self.detect_hex_ischanged = False + self.mu_only = False + self.is_relay = False + + self.relay_rule_list = {} + self.node_ip_list = [] + self.mu_port_list = [] + + self.has_stopped = False + + def update_all_user(self, dt_transfer): + import cymysql + update_transfer = {} + + query_head = 'UPDATE user' + query_sub_when = '' + query_sub_when2 = '' + query_sub_in = None + + alive_user_count = 0 + bandwidth_thistime = 0 + + if get_config().MYSQL_SSL_ENABLE == 1: + conn = cymysql.connect( + host=get_config().MYSQL_HOST, + port=get_config().MYSQL_PORT, + user=get_config().MYSQL_USER, + passwd=get_config().MYSQL_PASS, + db=get_config().MYSQL_DB, + charset='utf8', + ssl={ + 'ca': get_config().MYSQL_SSL_CA, + 'cert': get_config().MYSQL_SSL_CERT, + 'key': get_config().MYSQL_SSL_KEY}) + else: + conn = cymysql.connect( + host=get_config().MYSQL_HOST, + port=get_config().MYSQL_PORT, + user=get_config().MYSQL_USER, + passwd=get_config().MYSQL_PASS, + db=get_config().MYSQL_DB, + charset='utf8') + + conn.autocommit(True) + + for id in dt_transfer.keys(): + if dt_transfer[id][0] == 0 and dt_transfer[id][1] == 0: + continue + + query_sub_when += ' WHEN %s THEN u+%s' % ( + id, dt_transfer[id][0] * self.traffic_rate) + query_sub_when2 += ' WHEN %s THEN d+%s' % ( + id, dt_transfer[id][1] * self.traffic_rate) + update_transfer[id] = dt_transfer[id] + + alive_user_count = alive_user_count + 1 + + cur = conn.cursor() + cur.execute("INSERT INTO `user_traffic_log` (`id`, `user_id`, `u`, `d`, `Node_ID`, `rate`, `traffic`, `log_time`) VALUES (NULL, '" + + str(self.port_uid_table[id]) + + "', '" + + str(dt_transfer[id][0]) + + "', '" + + str(dt_transfer[id][1]) + + "', '" + + str(get_config().NODE_ID) + + "', '" + + str(self.traffic_rate) + + "', '" + + self.trafficShow((dt_transfer[id][0] + + dt_transfer[id][1]) * + self.traffic_rate) + + "', unix_timestamp()); ") + cur.close() + + bandwidth_thistime = bandwidth_thistime + \ + ((dt_transfer[id][0] + dt_transfer[id][1]) * self.traffic_rate) + + if query_sub_in is not None: + query_sub_in += ',%s' % id + else: + query_sub_in = '%s' % id + if query_sub_when != '': + query_sql = query_head + ' SET u = CASE port' + query_sub_when + \ + ' END, d = CASE port' + query_sub_when2 + \ + ' END, t = unix_timestamp() ' + \ + ' WHERE port IN (%s)' % query_sub_in + + cur = conn.cursor() + cur.execute(query_sql) + cur.close() + + cur = conn.cursor() + cur.execute( + "UPDATE `ss_node` SET `node_heartbeat`=unix_timestamp(),`node_bandwidth`=`node_bandwidth`+'" + + str(bandwidth_thistime) + + "' WHERE `id` = " + + str( + get_config().NODE_ID) + + " ; ") + cur.close() + + cur = conn.cursor() + cur.execute("INSERT INTO `ss_node_online_log` (`id`, `node_id`, `online_user`, `log_time`) VALUES (NULL, '" + + str(get_config().NODE_ID) + "', '" + str(alive_user_count) + "', unix_timestamp()); ") + cur.close() + + cur = conn.cursor() + cur.execute("INSERT INTO `ss_node_info` (`id`, `node_id`, `uptime`, `load`, `log_time`) VALUES (NULL, '" + + str(get_config().NODE_ID) + "', '" + str(self.uptime()) + "', '" + str(self.load()) + "', unix_timestamp()); ") + cur.close() + + online_iplist = ServerPool.get_instance().get_servers_iplist() + for id in online_iplist.keys(): + for ip in online_iplist[id]: + cur = conn.cursor() + cur.execute("INSERT INTO `alive_ip` (`id`, `nodeid`,`userid`, `ip`, `datetime`) VALUES (NULL, '" + str( + get_config().NODE_ID) + "','" + str(self.port_uid_table[id]) + "', '" + str(ip) + "', unix_timestamp())") + cur.close() + + detect_log_list = ServerPool.get_instance().get_servers_detect_log() + for port in detect_log_list.keys(): + for rule_id in detect_log_list[port]: + cur = conn.cursor() + cur.execute("INSERT INTO `detect_log` (`id`, `user_id`, `list_id`, `datetime`, `node_id`) VALUES (NULL, '" + str( + self.port_uid_table[port]) + "', '" + str(rule_id) + "', UNIX_TIMESTAMP(), '" + str(get_config().NODE_ID) + "')") + cur.close() + + deny_str = "" + if platform.system() == 'Linux' and get_config().ANTISSATTACK == 1: + wrong_iplist = ServerPool.get_instance().get_servers_wrong() + server_ip = socket.gethostbyname(get_config().MYSQL_HOST) + for id in wrong_iplist.keys(): + for ip in wrong_iplist[id]: + realip = "" + is_ipv6 = False + if common.is_ip(ip): + if(common.is_ip(ip) == socket.AF_INET): + realip = ip + else: + if common.match_ipv4_address(ip) is not None: + realip = common.match_ipv4_address(ip) + else: + is_ipv6 = True + realip = ip + else: + continue + + if str(realip).find(str(server_ip)) != -1: + continue + + has_match_node = False + for node_ip in self.node_ip_list: + if str(realip).find(node_ip) != -1: + has_match_node = True + continue + + if has_match_node: + continue + + cur = conn.cursor() + cur.execute( + "SELECT * FROM `blockip` where `ip` = '" + + str(realip) + + "'") + rows = cur.fetchone() + cur.close() + + if rows is not None: + continue + if get_config().CLOUDSAFE == 1: + cur = conn.cursor() + cur.execute( + "INSERT INTO `blockip` (`id`, `nodeid`, `ip`, `datetime`) VALUES (NULL, '" + + str( + get_config().NODE_ID) + + "', '" + + str(realip) + + "', unix_timestamp())") + cur.close() + else: + if not is_ipv6: + os.system('route add -host %s gw 127.0.0.1' % + str(realip)) + deny_str = deny_str + "\nALL: " + str(realip) + else: + os.system( + 'ip -6 route add ::1/128 via %s/128' % + str(realip)) + deny_str = deny_str + \ + "\nALL: [" + str(realip) + "]/128" + + logging.info("Local Block ip:" + str(realip)) + if get_config().CLOUDSAFE == 0: + deny_file = open('/etc/hosts.deny', 'a') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_file.write(deny_str) + deny_file.close() + conn.close() + return update_transfer + + def uptime(self): + with open('/proc/uptime', 'r') as f: + return float(f.readline().split()[0]) + + def load(self): + import os + return os.popen( + "cat /proc/loadavg | awk '{ print $1\" \"$2\" \"$3 }'").readlines()[0][:-2] + + def trafficShow(self, Traffic): + if Traffic < 1024: + return str(round((Traffic), 2)) + "B" + + if Traffic < 1024 * 1024: + return str(round((Traffic / 1024), 2)) + "KB" + + if Traffic < 1024 * 1024 * 1024: + return str(round((Traffic / 1024 / 1024), 2)) + "MB" + + return str(round((Traffic / 1024 / 1024 / 1024), 2)) + "GB" + + def push_db_all_user(self): + # 更新用户流量到数据库 + last_transfer = self.last_update_transfer + curr_transfer = ServerPool.get_instance().get_servers_transfer() + # 上次和本次的增量 + dt_transfer = {} + for id in curr_transfer.keys(): + if id in last_transfer: + if curr_transfer[id][0] + curr_transfer[id][1] - \ + last_transfer[id][0] - last_transfer[id][1] <= 0: + continue + if last_transfer[id][0] <= curr_transfer[id][0] and \ + last_transfer[id][1] <= curr_transfer[id][1]: + dt_transfer[id] = [ + curr_transfer[id][0] - last_transfer[id][0], + curr_transfer[id][1] - last_transfer[id][1]] + else: + dt_transfer[id] = [curr_transfer[ + id][0], curr_transfer[id][1]] + else: + if curr_transfer[id][0] + curr_transfer[id][1] <= 0: + continue + dt_transfer[id] = [curr_transfer[id][0], curr_transfer[id][1]] + for id in dt_transfer.keys(): + last = last_transfer.get(id, [0, 0]) + last_transfer[id] = [last[0] + dt_transfer[id] + [0], last[1] + dt_transfer[id][1]] + self.last_update_transfer = last_transfer.copy() + self.update_all_user(dt_transfer) + + def pull_db_all_user(self): + import cymysql + # 数据库所有用户信息 + try: + switchrule = importloader.load('switchrule') + keys = switchrule.getKeys() + except Exception as e: + keys = [ + 'id', + 'port', + 'u', + 'd', + 'transfer_enable', + 'passwd', + 'enable', + 'method', + 'protocol', + 'protocol_param', + 'obfs', + 'obfs_param', + 'node_speedlimit', + 'forbidden_ip', + 'forbidden_port', + 'disconnect_ip', + 'is_multi_user'] + + if get_config().MYSQL_SSL_ENABLE == 1: + conn = cymysql.connect( + host=get_config().MYSQL_HOST, + port=get_config().MYSQL_PORT, + user=get_config().MYSQL_USER, + passwd=get_config().MYSQL_PASS, + db=get_config().MYSQL_DB, + charset='utf8', + ssl={ + 'ca': get_config().MYSQL_SSL_CA, + 'cert': get_config().MYSQL_SSL_CERT, + 'key': get_config().MYSQL_SSL_KEY}) + else: + conn = cymysql.connect( + host=get_config().MYSQL_HOST, + port=get_config().MYSQL_PORT, + user=get_config().MYSQL_USER, + passwd=get_config().MYSQL_PASS, + db=get_config().MYSQL_DB, + charset='utf8') + conn.autocommit(True) + + cur = conn.cursor() + + cur.execute("SELECT `node_group`,`node_class`,`node_speedlimit`,`traffic_rate`,`mu_only`,`sort` FROM ss_node where `id`='" + + str(get_config().NODE_ID) + "' AND (`node_bandwidth`<`node_bandwidth_limit` OR `node_bandwidth_limit`=0)") + nodeinfo = cur.fetchone() + + if nodeinfo is None: + rows = [] + cur.close() + conn.commit() + conn.close() + return rows + + cur.close() + + self.node_speedlimit = float(nodeinfo[2]) + self.traffic_rate = float(nodeinfo[3]) + + self.mu_only = int(nodeinfo[4]) + + if nodeinfo[5] == 10: + self.is_relay = True + else: + self.is_relay = False + + if nodeinfo[0] == 0: + node_group_sql = "" + else: + node_group_sql = "AND `node_group`=" + str(nodeinfo[0]) + + cur = conn.cursor() + cur.execute("SELECT " + + ','.join(keys) + + " FROM user WHERE ((`class`>=" + + str(nodeinfo[1]) + + " " + + node_group_sql + + ") OR `is_admin`=1) AND`enable`=1 AND `expire_in`>now() AND `transfer_enable`>`u`+`d`") + rows = [] + for r in cur.fetchall(): + d = {} + for column in range(len(keys)): + d[keys[column]] = r[column] + rows.append(d) + cur.close() + + # 读取节点IP + # SELECT * FROM `ss_node` where `node_ip` != '' + self.node_ip_list = [] + cur = conn.cursor() + cur.execute("SELECT `node_ip` FROM `ss_node` where `node_ip` != ''") + for r in cur.fetchall(): + temp_list = str(r[0]).split(',') + self.node_ip_list.append(temp_list[0]) + cur.close() + + # 读取审计规则,数据包匹配部分 + keys_detect = ['id', 'regex'] + + cur = conn.cursor() + cur.execute("SELECT " + ','.join(keys_detect) + + " FROM detect_list where `type` = 1") + + exist_id_list = [] + + for r in cur.fetchall(): + id = int(r[0]) + exist_id_list.append(id) + if id not in self.detect_text_list: + d = {} + d['id'] = id + d['regex'] = str(r[1]) + self.detect_text_list[id] = d + self.detect_text_ischanged = True + else: + if r[1] != self.detect_text_list[id]['regex']: + del self.detect_text_list[id] + d = {} + d['id'] = id + d['regex'] = str(r[1]) + self.detect_text_list[id] = d + self.detect_text_ischanged = True + + deleted_id_list = [] + for id in self.detect_text_list: + if id not in exist_id_list: + deleted_id_list.append(id) + self.detect_text_ischanged = True + + for id in deleted_id_list: + del self.detect_text_list[id] + + cur.close() + + cur = conn.cursor() + cur.execute("SELECT " + ','.join(keys_detect) + + " FROM detect_list where `type` = 2") + + exist_id_list = [] + + for r in cur.fetchall(): + id = int(r[0]) + exist_id_list.append(id) + if r[0] not in self.detect_hex_list: + d = {} + d['id'] = id + d['regex'] = str(r[1]) + self.detect_hex_list[id] = d + self.detect_hex_ischanged = True + else: + if r[1] != self.detect_hex_list[r[0]]['regex']: + del self.detect_hex_list[id] + d = {} + d['id'] = int(r[0]) + d['regex'] = str(r[1]) + self.detect_hex_list[id] = d + self.detect_hex_ischanged = True + + deleted_id_list = [] + for id in self.detect_hex_list: + if id not in exist_id_list: + deleted_id_list.append(id) + self.detect_hex_ischanged = True + + for id in deleted_id_list: + del self.detect_hex_list[id] + + cur.close() + + # 读取中转规则,如果是中转节点的话 + + if self.is_relay: + self.relay_rule_list = {} + + keys_detect = ['id', 'user_id', 'dist_ip', 'port', 'priority'] + + cur = conn.cursor() + cur.execute("SELECT " + + ','.join(keys_detect) + + " FROM relay where `source_node_id` = 0 or `source_node_id` = " + + str(get_config().NODE_ID)) + + for r in cur.fetchall(): + d = {} + d['id'] = int(r[0]) + d['user_id'] = int(r[1]) + d['dist_ip'] = str(r[2]) + d['port'] = int(r[3]) + d['priority'] = int(r[4]) + self.relay_rule_list[d['id']] = d + + cur.close() + + conn.close() + return rows + + def cmp(self, val1, val2): + if isinstance(val1, bytes): + val1 = common.to_str(val1) + if isinstance(val2, bytes): + val2 = common.to_str(val2) + return val1 == val2 + + def del_server_out_of_bound_safe(self, last_rows, rows): + # 停止超流量的服务 + # 启动没超流量的服务 + # 需要动态载入switchrule,以便实时修改规则 + + try: + switchrule = importloader.load('switchrule') + except Exception as e: + logging.error('load switchrule.py fail') + cur_servers = {} + new_servers = {} + + md5_users = {} + + self.mu_port_list = [] + + for row in rows: + if row['is_multi_user'] != 0: + self.mu_port_list.append(int(row['port'])) + continue + + md5_users[row['id']] = row.copy() + del md5_users[row['id']]['u'] + del md5_users[row['id']]['d'] + if md5_users[row['id']]['disconnect_ip'] is None: + md5_users[row['id']]['disconnect_ip'] = '' + + if md5_users[row['id']]['forbidden_ip'] is None: + md5_users[row['id']]['forbidden_ip'] = '' + + if md5_users[row['id']]['forbidden_port'] is None: + md5_users[row['id']]['forbidden_port'] = '' + md5_users[row['id']]['md5'] = common.get_md5( + str(row['id']) + row['passwd'] + row['method'] + row['obfs'] + row['protocol']) + + for row in rows: + self.port_uid_table[row['port']] = row['id'] + self.uid_port_table[row['id']] = row['port'] + + if self.mu_only == 1: + i = 0 + while i < len(rows): + if rows[i]['is_multi_user'] == 0: + rows.pop(i) + i -= 1 + else: + pass + i += 1 + + for row in rows: + port = row['port'] + user_id = row['id'] + passwd = common.to_bytes(row['passwd']) + cfg = {'password': passwd} + + read_config_keys = [ + 'method', + 'obfs', + 'obfs_param', + 'protocol', + 'protocol_param', + 'forbidden_ip', + 'forbidden_port', + 'node_speedlimit', + 'disconnect_ip', + 'is_multi_user'] + + for name in read_config_keys: + if name in row and row[name]: + cfg[name] = row[name] + + merge_config_keys = ['password'] + read_config_keys + for name in cfg.keys(): + if hasattr(cfg[name], 'encode'): + try: + cfg[name] = cfg[name].encode('utf-8') + except Exception as e: + logging.warning( + 'encode cfg key "%s" fail, val "%s"' % (name, cfg[name])) + + if 'node_speedlimit' in cfg: + if float( + self.node_speedlimit) > 0.0 or float( + cfg['node_speedlimit']) > 0.0: + cfg['node_speedlimit'] = max( + float( + self.node_speedlimit), float( + cfg['node_speedlimit'])) + else: + cfg['node_speedlimit'] = max( + float(self.node_speedlimit), float(0.00)) + + if 'disconnect_ip' not in cfg: + cfg['disconnect_ip'] = '' + + if 'forbidden_ip' not in cfg: + cfg['forbidden_ip'] = '' + + if 'forbidden_port' not in cfg: + cfg['forbidden_port'] = '' + + if 'protocol_param' not in cfg: + cfg['protocol_param'] = '' + + if 'obfs_param' not in cfg: + cfg['obfs_param'] = '' + + if 'is_multi_user' not in cfg: + cfg['is_multi_user'] = 0 + + if port not in cur_servers: + cur_servers[port] = passwd + else: + logging.error( + 'more than one user use the same port [%s]' % (port,)) + continue + + if cfg['is_multi_user'] != 0: + cfg['users_table'] = md5_users.copy() + + cfg['detect_hex_list'] = self.detect_hex_list.copy() + cfg['detect_text_list'] = self.detect_text_list.copy() + + if self.is_relay and row['is_multi_user'] != 2: + temp_relay_rules = {} + for id in self.relay_rule_list: + if ((self.relay_rule_list[id]['user_id'] == user_id or self.relay_rule_list[id]['user_id'] == 0) or row[ + 'is_multi_user'] != 0) and (self.relay_rule_list[id]['port'] == 0 or self.relay_rule_list[id]['port'] == port): + has_higher_priority = False + for priority_id in self.relay_rule_list: + if ( + ( + self.relay_rule_list[priority_id]['priority'] > self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] != self.relay_rule_list[priority_id]['id']) or ( + self.relay_rule_list[priority_id]['priority'] == self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] > self.relay_rule_list[priority_id]['id'])) and ( + self.relay_rule_list[priority_id]['user_id'] == user_id or self.relay_rule_list[priority_id]['user_id'] == 0) and ( + self.relay_rule_list[priority_id]['port'] == port or self.relay_rule_list[priority_id]['port'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self.relay_rule_list[id]['dist_ip'] == '0.0.0.0' and row['is_multi_user'] == 0: + continue + + temp_relay_rules[id] = self.relay_rule_list[id] + + cfg['relay_rules'] = temp_relay_rules.copy() + else: + temp_relay_rules = {} + + cfg['relay_rules'] = temp_relay_rules.copy() + + if ServerPool.get_instance().server_is_run(port) > 0: + cfgchange = False + if self.detect_text_ischanged or self.detect_hex_ischanged: + cfgchange = True + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().tcp_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().udp_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + + if row['is_multi_user'] != 0: + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_multi_user_table(md5_users) + + if self.is_relay and row['is_multi_user'] != 2: + temp_relay_rules = {} + for id in self.relay_rule_list: + if ((self.relay_rule_list[id]['user_id'] == user_id or self.relay_rule_list[id]['user_id'] == 0) or row[ + 'is_multi_user'] != 0) and (self.relay_rule_list[id]['port'] == 0 or self.relay_rule_list[id]['port'] == port): + has_higher_priority = False + for priority_id in self.relay_rule_list: + if ( + ( + self.relay_rule_list[priority_id]['priority'] > self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] != self.relay_rule_list[priority_id]['id']) or ( + self.relay_rule_list[priority_id]['priority'] == self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] > self.relay_rule_list[priority_id]['id'])) and ( + self.relay_rule_list[priority_id]['user_id'] == user_id or self.relay_rule_list[priority_id]['user_id'] == 0) and ( + self.relay_rule_list[priority_id]['port'] == port or self.relay_rule_list[priority_id]['port'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self.relay_rule_list[id][ + 'dist_ip'] == '0.0.0.0' and row['is_multi_user'] == 0: + continue + + temp_relay_rules[id] = self.relay_rule_list[id] + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + + else: + temp_relay_rules = {} + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + + if port in ServerPool.get_instance().tcp_servers_pool: + relay = ServerPool.get_instance().tcp_servers_pool[port] + for name in merge_config_keys: + if name in cfg and not self.cmp( + cfg[name], relay._config[name]): + cfgchange = True + break + if not cfgchange and port in ServerPool.get_instance().tcp_ipv6_servers_pool: + relay = ServerPool.get_instance().tcp_ipv6_servers_pool[ + port] + for name in merge_config_keys: + if name in cfg and not self.cmp( + cfg[name], relay._config[name]): + cfgchange = True + break + # config changed + if cfgchange: + self.del_server(port, "config changed") + new_servers[port] = (passwd, cfg) + elif ServerPool.get_instance().server_run_status(port) is False: + # new_servers[port] = passwd + self.new_server(port, passwd, cfg) + + for row in last_rows: + if row['port'] in cur_servers: + pass + else: + self.del_server(row['port'], "port not exist") + + if len(new_servers) > 0: + from shadowsocks import eventloop + self.event.wait(eventloop.TIMEOUT_PRECISION + + eventloop.TIMEOUT_PRECISION / 2) + for port in new_servers.keys(): + passwd, cfg = new_servers[port] + self.new_server(port, passwd, cfg) + + ServerPool.get_instance().push_uid_port_table(self.uid_port_table) + + def del_server(self, port, reason): + logging.info( + 'db stop server at port [%s] reason: %s!' % (port, reason)) + ServerPool.get_instance().cb_del_server(port) + if port in self.last_update_transfer: + del self.last_update_transfer[port] + + for mu_user_port in self.mu_port_list: + if mu_user_port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + + def new_server(self, port, passwd, cfg): + protocol = cfg.get( + 'protocol', + ServerPool.get_instance().config.get( + 'protocol', + 'origin')) + method = cfg.get( + 'method', ServerPool.get_instance().config.get('method', 'None')) + obfs = cfg.get( + 'obfs', ServerPool.get_instance().config.get('obfs', 'plain')) + logging.info( + 'db start server at port [%s] pass [%s] protocol [%s] method [%s] obfs [%s]' % + (port, passwd, protocol, method, obfs)) + ServerPool.get_instance().new_server(port, cfg) + + @staticmethod + def del_servers(): + global db_instance + for port in [ + v for v in ServerPool.get_instance().tcp_servers_pool.keys()]: + if ServerPool.get_instance().server_is_run(port) > 0: + ServerPool.get_instance().cb_del_server(port) + if port in db_instance.last_update_transfer: + del db_instance.last_update_transfer[port] + for port in [ + v for v in ServerPool.get_instance().tcp_ipv6_servers_pool.keys()]: + if ServerPool.get_instance().server_is_run(port) > 0: + ServerPool.get_instance().cb_del_server(port) + if port in db_instance.last_update_transfer: + del db_instance.last_update_transfer[port] + + @staticmethod + def thread_db(obj): + import socket + import time + global db_instance + timeout = 60 + socket.setdefaulttimeout(timeout) + last_rows = [] + db_instance = obj() + + shell.log_shadowsocks_version() + try: + import resource + logging.info( + 'current process RLIMIT_NOFILE resource: soft %d hard %d' % + resource.getrlimit( + resource.RLIMIT_NOFILE)) + except: + pass + try: + while True: + load_config() + try: + db_instance.push_db_all_user() + rows = db_instance.pull_db_all_user() + db_instance.del_server_out_of_bound_safe(last_rows, rows) + db_instance.detect_text_ischanged = False + db_instance.detect_hex_ischanged = False + last_rows = rows + except Exception as e: + trace = traceback.format_exc() + logging.error(trace) + # logging.warn('db thread except:%s' % e) + if db_instance.event.wait(60) or not db_instance.is_all_thread_alive(): + break + if db_instance.has_stopped: + break + except KeyboardInterrupt as e: + pass + db_instance.del_servers() + ServerPool.get_instance().stop() + db_instance = None + + @staticmethod + def thread_db_stop(): + global db_instance + db_instance.has_stopped = True + db_instance.event.set() + + def is_all_thread_alive(self): + if not ServerPool.get_instance().thread.is_alive(): + return False + return True diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 0000000..4e7ad16 --- /dev/null +++ b/debian/changelog @@ -0,0 +1,5 @@ +shadowsocks (2.1.0-1) unstable; urgency=low + + * Initial release (Closes: #758900) + + -- Shell.Xu Sat, 23 Aug 2014 00:56:04 +0800 diff --git a/debian/compat b/debian/compat new file mode 100644 index 0000000..45a4fb7 --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +8 diff --git a/debian/config.json b/debian/config.json new file mode 100644 index 0000000..35cb14a --- /dev/null +++ b/debian/config.json @@ -0,0 +1,11 @@ +{ + "server":"my_server_ip", + "server_port":8388, + "local_address": "127.0.0.1", + "local_port":1080, + "password":"mypassword", + "timeout":300, + "method":"aes-256-cfb", + "fast_open": false, + "workers": 1 +} \ No newline at end of file diff --git a/debian/control b/debian/control new file mode 100644 index 0000000..da00920 --- /dev/null +++ b/debian/control @@ -0,0 +1,19 @@ +Source: shadowsocks +Section: python +Priority: extra +Maintainer: Shell.Xu +Build-Depends: debhelper (>= 8), python-all (>= 2.6.6-3~), python-setuptools +Standards-Version: 3.9.5 +Homepage: https://github.com/clowwindy/shadowsocks +Vcs-Git: git://github.com/shell909090/shadowsocks.git +Vcs-Browser: http://github.com/shell909090/shadowsocks + +Package: shadowsocks +Architecture: all +Pre-Depends: dpkg (>= 1.15.6~) +Depends: ${misc:Depends}, ${python:Depends}, python-pkg-resources, python-m2crypto +Description: Fast tunnel proxy that helps you bypass firewalls + A secure socks5 proxy, designed to protect your Internet traffic. + . + This package contain local and server part of shadowsocks, a fast, + powerful tunnel proxy to bypass firewalls. \ No newline at end of file diff --git a/debian/copyright b/debian/copyright new file mode 100644 index 0000000..7be8162 --- /dev/null +++ b/debian/copyright @@ -0,0 +1,30 @@ +Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ +Upstream-Name: shadowsocks +Source: https://github.com/clowwindy/shadowsocks + +Files: debian/* +Copyright: 2014 Shell.Xu +License: Expat + +Files: * +Copyright: 2014 clowwindy +License: Expat + +License: Expat + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + . + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + . + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. diff --git a/debian/docs b/debian/docs new file mode 100644 index 0000000..0208fc1 --- /dev/null +++ b/debian/docs @@ -0,0 +1,2 @@ +README.md +README.rst diff --git a/debian/init.d b/debian/init.d new file mode 100644 index 0000000..2f4f352 --- /dev/null +++ b/debian/init.d @@ -0,0 +1,149 @@ +#!/bin/sh +### BEGIN INIT INFO +# Provides: shadowsocks +# Required-Start: $network $local_fs $remote_fs +# Required-Stop: $network $local_fs $remote_fs +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Fast tunnel proxy that helps you bypass firewalls +# Description: A secure socks5 proxy, designed to protect your Internet traffic. +# This package contain local and server part of shadowsocks, a fast, +# powerful tunnel proxy to bypass firewalls. +### END INIT INFO + +# Author: Shell.Xu + +# PATH should only include /usr/* if it runs after the mountnfs.sh script +PATH=/sbin:/usr/sbin:/bin:/usr/bin +DESC=shadowsocks # Introduce a short description here +NAME=shadowsocks # Introduce the short server's name here +DAEMON=/usr/bin/ssserver # Introduce the server's location here +DAEMON_ARGS="" # Arguments to run the daemon with +PIDFILE=/var/run/$NAME.pid +SCRIPTNAME=/etc/init.d/$NAME +LOGFILE=/var/log/$NAME.log + +# Exit if the package is not installed +[ -x $DAEMON ] || exit 0 + +# Read configuration variable file if it is present +[ -r /etc/default/$NAME ] && . /etc/default/$NAME + +# Load the VERBOSE setting and other rcS variables +. /lib/init/vars.sh + +# Define LSB log_* functions. +# Depend on lsb-base (>= 3.0-6) to ensure that this file is present. +. /lib/lsb/init-functions + +# +# Function that starts the daemon/service +# +do_start() +{ + # Return + # 0 if daemon has been started + # 1 if daemon was already running + # 2 if daemon could not be started + start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON \ + --background --make-pidfile --chdir / --chuid $USERID --no-close --test > /dev/null \ + || return 1 + start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON \ + --background --make-pidfile --chdir / --chuid $USERID --no-close -- \ + $DAEMON_ARGS $DAEMON_OPTS >> $LOGFILE 2>&1 \ + || return 2 + # Add code here, if necessary, that waits for the process to be ready + # to handle requests from services started subsequently which depend + # on this one. As a last resort, sleep for some time. +} + +# +# Function that stops the daemon/service +# +do_stop() +{ + # Return + # 0 if daemon has been stopped + # 1 if daemon was already stopped + # 2 if daemon could not be stopped + # other if a failure occurred + start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 --pidfile $PIDFILE + RETVAL="$?" + [ "$RETVAL" = 2 ] && return 2 + # Many daemons don't delete their pidfiles when they exit. + rm -f $PIDFILE + return "$RETVAL" +} + +# +# Function that sends a SIGHUP to the daemon/service +# +do_reload() { + # + # If the daemon can reload its configuration without + # restarting (for example, when it is sent a SIGHUP), + # then implement that here. + # + start-stop-daemon --stop --signal 1 --quiet --pidfile $PIDFILE --name $NAME + return 0 +} + +case "$1" in + start) + [ "$VERBOSE" != no ] && log_daemon_msg "Starting $DESC " "$NAME" + do_start + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + stop) + [ "$VERBOSE" != no ] && log_daemon_msg "Stopping $DESC" "$NAME" + do_stop + case "$?" in + 0|1) [ "$VERBOSE" != no ] && log_end_msg 0 ;; + 2) [ "$VERBOSE" != no ] && log_end_msg 1 ;; + esac + ;; + status) + status_of_proc "$DAEMON" "$NAME" && exit 0 || exit $? + ;; + #reload|force-reload) + # + # If do_reload() is not implemented then leave this commented out + # and leave 'force-reload' as an alias for 'restart'. + # + #log_daemon_msg "Reloading $DESC" "$NAME" + #do_reload + #log_end_msg $? + #;; + restart|force-reload) + # + # If the "reload" option is implemented then remove the + # 'force-reload' alias + # + log_daemon_msg "Restarting $DESC" "$NAME" + do_stop + case "$?" in + 0|1) + do_start + case "$?" in + 0) log_end_msg 0 ;; + 1) log_end_msg 1 ;; # Old process is still running + *) log_end_msg 1 ;; # Failed to start + esac + ;; + *) + # Failed to stop + log_end_msg 1 + ;; + esac + ;; + *) + #echo "Usage: $SCRIPTNAME {start|stop|restart|reload|force-reload}" >&2 + echo "Usage: $SCRIPTNAME {start|stop|status|restart|force-reload}" >&2 + exit 3 + ;; +esac + +: diff --git a/debian/install b/debian/install new file mode 100644 index 0000000..a614864 --- /dev/null +++ b/debian/install @@ -0,0 +1 @@ +debian/config.json etc/shadowsocks/ \ No newline at end of file diff --git a/debian/rules b/debian/rules new file mode 100644 index 0000000..62e2bb6 --- /dev/null +++ b/debian/rules @@ -0,0 +1,5 @@ +#!/usr/bin/make -f +# -*- makefile -*- + +%: + dh $@ --with python2 --buildsystem=python_distutils diff --git a/debian/shadowsocks.default b/debian/shadowsocks.default new file mode 100644 index 0000000..a520602 --- /dev/null +++ b/debian/shadowsocks.default @@ -0,0 +1,12 @@ +# Defaults for shadowsocks initscript +# sourced by /etc/init.d/shadowsocks +# installed at /etc/default/shadowsocks by the maintainer scripts + +USERID="nobody" + +# +# This is a POSIX shell fragment +# + +# Additional options that are passed to the Daemon. +DAEMON_OPTS="-q -c /etc/shadowsocks/config.json" diff --git a/debian/shadowsocks.manpages b/debian/shadowsocks.manpages new file mode 100644 index 0000000..3df8a33 --- /dev/null +++ b/debian/shadowsocks.manpages @@ -0,0 +1,2 @@ +debian/sslocal.1 +debian/ssserver.1 \ No newline at end of file diff --git a/debian/source/format b/debian/source/format new file mode 100644 index 0000000..163aaf8 --- /dev/null +++ b/debian/source/format @@ -0,0 +1 @@ +3.0 (quilt) diff --git a/debian/sslocal.1 b/debian/sslocal.1 new file mode 100644 index 0000000..0c2cf51 --- /dev/null +++ b/debian/sslocal.1 @@ -0,0 +1,59 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" (C) Copyright 2014 Shell.Xu , +.\" +.TH SHADOWSOCKS 1 "August 23, 2014" +.SH NAME +shadowsocks \- Fast tunnel proxy that helps you bypass firewalls +.SH SYNOPSIS +.B ssserver +.RI [ options ] +.br +.B sslocal +.RI [ options ] +.SH DESCRIPTION +shadowsocks is a tunnel proxy helps you bypass firewall. +.B ssserver +is the server part, and +.B sslocal +is the local part. +.SH OPTIONS +.TP +.B \-h, \-\-help +Show this help message and exit. +.TP +.B \-s SERVER_ADDR +Server address, default: 0.0.0.0. +.TP +.B \-p SERVER_PORT +Server port, default: 8388. +.TP +.B \-k PASSWORD +Password. +.TP +.B \-m METHOD +Encryption method, default: aes-256-cfb. +.TP +.B \-t TIMEOUT +Timeout in seconds, default: 300. +.TP +.B \-c CONFIG +Path to config file. +.TP +.B \-\-fast-open +Use TCP_FASTOPEN, requires Linux 3.7+. +.TP +.B \-\-workers WORKERS +Number of workers, available on Unix/Linux. +.TP +.B \-v, \-vv +Verbose mode. +.TP +.B \-q, \-qq +Quiet mode, only show warnings/errors. +.SH SEE ALSO +.br +The programs are documented fully by +.IR "Shell Xu " +and +.IR "Clowwindy ", +available via the Info system. diff --git a/debian/ssserver.1 b/debian/ssserver.1 new file mode 100644 index 0000000..0c2cf51 --- /dev/null +++ b/debian/ssserver.1 @@ -0,0 +1,59 @@ +.\" Hey, EMACS: -*- nroff -*- +.\" (C) Copyright 2014 Shell.Xu , +.\" +.TH SHADOWSOCKS 1 "August 23, 2014" +.SH NAME +shadowsocks \- Fast tunnel proxy that helps you bypass firewalls +.SH SYNOPSIS +.B ssserver +.RI [ options ] +.br +.B sslocal +.RI [ options ] +.SH DESCRIPTION +shadowsocks is a tunnel proxy helps you bypass firewall. +.B ssserver +is the server part, and +.B sslocal +is the local part. +.SH OPTIONS +.TP +.B \-h, \-\-help +Show this help message and exit. +.TP +.B \-s SERVER_ADDR +Server address, default: 0.0.0.0. +.TP +.B \-p SERVER_PORT +Server port, default: 8388. +.TP +.B \-k PASSWORD +Password. +.TP +.B \-m METHOD +Encryption method, default: aes-256-cfb. +.TP +.B \-t TIMEOUT +Timeout in seconds, default: 300. +.TP +.B \-c CONFIG +Path to config file. +.TP +.B \-\-fast-open +Use TCP_FASTOPEN, requires Linux 3.7+. +.TP +.B \-\-workers WORKERS +Number of workers, available on Unix/Linux. +.TP +.B \-v, \-vv +Verbose mode. +.TP +.B \-q, \-qq +Quiet mode, only show warnings/errors. +.SH SEE ALSO +.br +The programs are documented fully by +.IR "Shell Xu " +and +.IR "Clowwindy ", +available via the Info system. diff --git a/detect.html b/detect.html new file mode 100644 index 0000000..f59ecf0 --- /dev/null +++ b/detect.html @@ -0,0 +1 @@ +由于碰撞到了审计规则,您的连接已经被阻断。 diff --git a/gnupg/__init__.py b/gnupg/__init__.py new file mode 100644 index 0000000..5c1430c --- /dev/null +++ b/gnupg/__init__.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +from __future__ import absolute_import + +from . import gnupg +from . import copyleft +from . import _ansistrm +from . import _logger +from . import _meta +from . import _parsers +from . import _util +from .gnupg import GPG +from ._version import get_versions + +__version__ = get_versions()['version'] +__authors__ = copyleft.authors +__license__ = copyleft.full_text +__copyleft__ = copyleft.copyright + +## do not set __package__ = "gnupg", else we will end up with +## gnupg.<*allofthethings*> +__all__ = ["GPG", "_util", "_parsers", "_meta", "_logger"] + +## avoid the "from gnupg import gnupg" idiom +del gnupg +del absolute_import +del copyleft +del get_versions +del _version diff --git a/gnupg/_ansistrm.py b/gnupg/_ansistrm.py new file mode 100644 index 0000000..cfd50a1 --- /dev/null +++ b/gnupg/_ansistrm.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python wrapper aroung GnuPG, and it was +# taken from https://gist.github.com/vsajip/758430 on the 14th of May, 2013. It +# has also been included in the 'logutils' Python module, see +# https://code.google.com/p/logutils/ . +# +# The original copyright and license text are as follows: +# | +# | Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. +# | Licensed under the new BSD license. +# | +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +import ctypes +import logging +import os + +class ColorizingStreamHandler(logging.StreamHandler): + # color names to indices + color_map = { + 'black': 0, + 'red': 1, + 'green': 2, + 'yellow': 3, + 'blue': 4, + 'magenta': 5, + 'cyan': 6, + 'white': 7, + } + + #levels to (background, foreground, bold/intense) + if os.name == 'nt': + level_map = { + logging.DEBUG: (None, 'blue', True), + logging.INFO: (None, 'green', False), + logging.WARNING: (None, 'yellow', True), + logging.ERROR: (None, 'red', True), + logging.CRITICAL: ('red', 'white', True), + } + else: + level_map = { + logging.DEBUG: (None, 'blue', False), + logging.INFO: (None, 'green', False), + logging.WARNING: (None, 'yellow', False), + logging.ERROR: (None, 'red', False), + logging.CRITICAL: ('red', 'white', True), + } + csi = '\x1b[' + reset = '\x1b[0m' + + @property + def is_tty(self): + isatty = getattr(self.stream, 'isatty', None) + return isatty and isatty() + + def emit(self, record): + try: + message = self.format(record) + stream = self.stream + if not self.is_tty: + stream.write(message) + else: + self.output_colorized(message) + stream.write(getattr(self, 'terminator', '\n')) + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + except: + self.handleError(record) + + if os.name != 'nt': + def output_colorized(self, message): + self.stream.write(message) + else: + import re + ansi_esc = re.compile(r'\x1b\[((?:\d+)(?:;(?:\d+))*)m') + + nt_color_map = { + 0: 0x00, # black + 1: 0x04, # red + 2: 0x02, # green + 3: 0x06, # yellow + 4: 0x01, # blue + 5: 0x05, # magenta + 6: 0x03, # cyan + 7: 0x07, # white + } + + def output_colorized(self, message): + parts = self.ansi_esc.split(message) + write = self.stream.write + h = None + fd = getattr(self.stream, 'fileno', None) + if fd is not None: + fd = fd() + if fd in (1, 2): # stdout or stderr + h = ctypes.windll.kernel32.GetStdHandle(-10 - fd) + while parts: + text = parts.pop(0) + if text: + write(text) + if parts: + params = parts.pop(0) + if h is not None: + params = [int(p) for p in params.split(';')] + color = 0 + for p in params: + if 40 <= p <= 47: + color |= self.nt_color_map[p - 40] << 4 + elif 30 <= p <= 37: + color |= self.nt_color_map[p - 30] + elif p == 1: + color |= 0x08 # foreground intensity on + elif p == 0: # reset to default color + color = 0x07 + else: + pass # error condition ignored + ctypes.windll.kernel32.SetConsoleTextAttribute(h, color) + + def colorize(self, message, record): + if record.levelno in self.level_map: + bg, fg, bold = self.level_map[record.levelno] + params = [] + if bg in self.color_map: + params.append(str(self.color_map[bg] + 40)) + if fg in self.color_map: + params.append(str(self.color_map[fg] + 30)) + if bold: + params.append('1') + if params: + message = ''.join((self.csi, ';'.join(params), + 'm', message, self.reset)) + return message + + def format(self, record): + message = logging.StreamHandler.format(self, record) + if self.is_tty: + # Don't colorize any traceback + parts = message.split('\n', 1) + parts[0] = self.colorize(parts[0], record) + message = '\n'.join(parts) + return message + +def main(): + root = logging.getLogger() + root.setLevel(logging.DEBUG) + root.addHandler(ColorizingStreamHandler()) + logging.debug('DEBUG') + logging.info('INFO') + logging.warning('WARNING') + logging.error('ERROR') + logging.critical('CRITICAL') + +if __name__ == '__main__': + main() diff --git a/gnupg/_logger.py b/gnupg/_logger.py new file mode 100644 index 0000000..870617e --- /dev/null +++ b/gnupg/_logger.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''Logging module for python-gnupg.''' + +from __future__ import absolute_import +from __future__ import print_function +from datetime import datetime +from functools import wraps + +import logging +import sys +import os + +try: + from logging import NullHandler +except: + class NullHandler(logging.Handler): + def handle(self, record): + pass + +from . import _ansistrm + +GNUPG_STATUS_LEVEL = 9 + +def status(self, message, *args, **kwargs): + """LogRecord for GnuPG internal status messages.""" + if self.isEnabledFor(GNUPG_STATUS_LEVEL): + self._log(GNUPG_STATUS_LEVEL, message, args, **kwargs) + +@wraps(logging.Logger) +def create_logger(level=logging.NOTSET): + """Create a logger for python-gnupg at a specific message level. + + :type level: :obj:`int` or :obj:`str` + :param level: A string or an integer for the lowest level to include in + logs. + + **Available levels:** + + ==== ======== ======================================== + int str description + ==== ======== ======================================== + 0 NOTSET Disable all logging. + 9 GNUPG Log GnuPG's internal status messages. + 10 DEBUG Log module level debuging messages. + 20 INFO Normal user-level messages. + 30 WARN Warning messages. + 40 ERROR Error messages and tracebacks. + 50 CRITICAL Unhandled exceptions and tracebacks. + ==== ======== ======================================== + """ + _test = os.path.join(os.path.join(os.getcwd(), 'gnupg'), 'test') + _now = datetime.now().strftime("%Y-%m-%d_%H%M%S") + _fn = os.path.join(_test, "%s_test_gnupg.log" % _now) + _fmt = "%(relativeCreated)-4d L%(lineno)-4d:%(funcName)-18.18s %(levelname)-7.7s %(message)s" + + ## Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module: + logging.addLevelName(GNUPG_STATUS_LEVEL, "GNUPG") + logging.Logger.status = status + + if level > logging.NOTSET: + logging.basicConfig(level=level, filename=_fn, + filemode="a", format=_fmt) + logging.logThreads = True + if hasattr(logging,'captureWarnings'): + logging.captureWarnings(True) + colouriser = _ansistrm.ColorizingStreamHandler + colouriser.level_map[9] = (None, 'blue', False) + colouriser.level_map[10] = (None, 'cyan', False) + handler = colouriser(sys.stderr) + handler.setLevel(level) + + formatr = logging.Formatter(_fmt) + handler.setFormatter(formatr) + else: + handler = NullHandler() + + log = logging.getLogger('gnupg') + log.addHandler(handler) + log.setLevel(level) + log.info("Log opened: %s UTC" % datetime.ctime(datetime.utcnow())) + return log diff --git a/gnupg/_meta.py b/gnupg/_meta.py new file mode 100644 index 0000000..32ab287 --- /dev/null +++ b/gnupg/_meta.py @@ -0,0 +1,1040 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''Meta and base classes for hiding internal functions, and controlling +attribute creation and handling. +''' + +from __future__ import absolute_import + +import atexit +import codecs +import encodings +## For AOS, the locale module will need to point to a wrapper around the +## java.util.Locale class. +## See https://code.patternsinthevoid.net/?p=android-locale-hack.git +import locale +import os +import platform +import shlex +import subprocess +import sys +import threading + +## Using psutil is recommended, but since the extension doesn't run with the +## PyPy interpreter, we'll run even if it's not present. +try: + import psutil +except ImportError: + psutil = None + +from . import _parsers +from . import _util +from ._util import b +from ._util import s + +from ._parsers import _check_preferences +from ._parsers import _sanitise_list +from ._util import log + + +class GPGMeta(type): + """Metaclass for changing the :meth:GPG.__init__ initialiser. + + Detects running gpg-agent processes and the presence of a pinentry + program, and disables pinentry so that python-gnupg can write the + passphrase to the controlled GnuPG process without killing the agent. + + :attr _agent_proc: If a :program:`gpg-agent` process is currently running + for the effective userid, then **_agent_proc** will be + set to a ``psutil.Process`` for that process. + """ + + def __new__(cls, name, bases, attrs): + """Construct the initialiser for GPG""" + log.debug("Metaclass __new__ constructor called for %r" % cls) + if cls._find_agent(): + ## call the normal GPG.__init__() initialiser: + attrs['init'] = cls.__init__ + attrs['_remove_agent'] = True + return super(GPGMeta, cls).__new__(cls, name, bases, attrs) + + @classmethod + def _find_agent(cls): + """Discover if a gpg-agent process for the current euid is running. + + If there is a matching gpg-agent process, set a :class:`psutil.Process` + instance containing the gpg-agent process' information to + ``cls._agent_proc``. + + For Unix systems, we check that the effective UID of this + ``python-gnupg`` process is also the owner of the gpg-agent + process. For Windows, we check that the usernames of the owners are + the same. (Sorry Windows users; maybe you should switch to anything + else.) + + .. note: This function will only run if the psutil_ Python extension + is installed. Because psutil won't run with the PyPy interpreter, + use of it is optional (although highly recommended). + + .. _psutil: https://pypi.python.org/pypi/psutil + + :returns: True if there exists a gpg-agent process running under the + same effective user ID as that of this program. Otherwise, + returns False. + """ + if not psutil: + return False + + this_process = psutil.Process(os.getpid()) + ownership_match = False + + if _util._running_windows: + identity = this_process.username() + else: + identity = this_process.uids + + for proc in psutil.process_iter(): + if (proc.name == "gpg-agent") and proc.is_running: + log.debug("Found gpg-agent process with pid %d" % proc.pid) + if _util._running_windows: + if proc.username() == identity: + ownership_match = True + else: + if proc.uids == identity: + ownership_match = True + + if ownership_match: + log.debug("Effective UIDs of this process and gpg-agent match") + setattr(cls, '_agent_proc', proc) + return True + + return False + + +class GPGBase(object): + """Base class for storing properties and controlling process initialisation. + + :const _result_map: A *dict* containing classes from + :mod:`~gnupg._parsers`, used for parsing results + obtained from GnuPG commands. + :const _decode_errors: How to handle encoding errors. + """ + __metaclass__ = GPGMeta + _decode_errors = 'strict' + _result_map = { 'crypt': _parsers.Crypt, + 'delete': _parsers.DeleteResult, + 'generate': _parsers.GenKey, + 'import': _parsers.ImportResult, + 'list': _parsers.ListKeys, + 'sign': _parsers.Sign, + 'verify': _parsers.Verify, + 'packets': _parsers.ListPackets } + + def __init__(self, binary=None, home=None, keyring=None, secring=None, + use_agent=False, default_preference_list=None, + ignore_homedir_permissions=False, verbose=False, options=None): + """Create a ``GPGBase``. + + This class is used to set up properties for controlling the behaviour + of configuring various options for GnuPG, such as setting GnuPG's + **homedir** , and the paths to its **binary** and **keyring** . + + :const binary: (:obj:`str`) The full path to the GnuPG binary. + + :ivar homedir: (:class:`~gnupg._util.InheritableProperty`) The full + path to the current setting for the GnuPG + ``--homedir``. + + :ivar _generated_keys: (:class:`~gnupg._util.InheritableProperty`) + Controls setting the directory for storing any + keys which are generated with + :meth:`~gnupg.GPG.gen_key`. + + :ivar str keyring: The filename in **homedir** to use as the keyring + file for public keys. + :ivar str secring: The filename in **homedir** to use as the keyring + file for secret keys. + """ + self.ignore_homedir_permissions = ignore_homedir_permissions + self.binary = _util._find_binary(binary) + self.homedir = os.path.expanduser(home) if home else _util._conf + pub = _parsers._fix_unsafe(keyring) if keyring else 'pubring.gpg' + sec = _parsers._fix_unsafe(secring) if secring else 'secring.gpg' + self.keyring = os.path.join(self._homedir, pub) + self.secring = os.path.join(self._homedir, sec) + self.options = list(_parsers._sanitise_list(options)) if options else None + + #: The version string of our GnuPG binary + self.binary_version = '0.0.0' + self.verbose = False + + if default_preference_list: + self._prefs = _check_preferences(default_preference_list, 'all') + else: + self._prefs = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH' + self._prefs += ' AES192 ZLIB ZIP Uncompressed' + + encoding = locale.getpreferredencoding() + if encoding is None: # This happens on Jython! + encoding = sys.stdin.encoding + self._encoding = encoding.lower().replace('-', '_') + self._filesystemencoding = encodings.normalize_encoding( + sys.getfilesystemencoding().lower()) + + # Issue #49: https://github.com/isislovecruft/python-gnupg/issues/49 + # + # During `line = stream.readline()` in `_read_response()`, the Python + # codecs module will choke on Unicode data, so we globally monkeypatch + # the "strict" error handler to use the builtin `replace_errors` + # handler: + codecs.register_error('strict', codecs.replace_errors) + + self._keyserver = 'hkp://wwwkeys.pgp.net' + self.__generated_keys = os.path.join(self.homedir, 'generated-keys') + + try: + assert self.binary, "Could not find binary %s" % binary + assert isinstance(verbose, (bool, str, int)), \ + "'verbose' must be boolean, string, or 0 <= n <= 9" + assert isinstance(use_agent, bool), "'use_agent' must be boolean" + if self.options is not None: + assert isinstance(self.options, list), "options not list" + except (AssertionError, AttributeError) as ae: + log.error("GPGBase.__init__(): %s" % str(ae)) + raise RuntimeError(str(ae)) + else: + self._set_verbose(verbose) + self.use_agent = use_agent + + if hasattr(self, '_agent_proc') \ + and getattr(self, '_remove_agent', None) is True: + if hasattr(self, '__remove_path__'): + self.__remove_path__('pinentry') + + # Assign our self.binary_version attribute: + self._check_sane_and_get_gpg_version() + + def __remove_path__(self, prog=None, at_exit=True): + """Remove the directories containing a program from the system's + ``$PATH``. If ``GPGBase.binary`` is in a directory being removed, it + is linked to :file:'./gpg' in the current directory. + + :param str prog: The program to remove from ``$PATH``. + :param bool at_exit: Add the program back into the ``$PATH`` when the + Python interpreter exits, and delete any symlinks + to ``GPGBase.binary`` which were created. + """ + #: A list of ``$PATH`` entries which were removed to disable pinentry. + self._removed_path_entries = [] + + log.debug("Attempting to remove %s from system PATH" % str(prog)) + if (prog is None) or (not isinstance(prog, str)): return + + try: + program = _util._which(prog)[0] + except (OSError, IOError, IndexError) as err: + log.err(str(err)) + log.err("Cannot find program '%s', not changing PATH." % prog) + return + + ## __remove_path__ cannot be an @classmethod in GPGMeta, because + ## the use_agent attribute must be set by the instance. + if not self.use_agent: + program_base = os.path.dirname(prog) + gnupg_base = os.path.dirname(self.binary) + + ## symlink our gpg binary into $PWD if the path we are removing is + ## the one which contains our gpg executable: + new_gpg_location = os.path.join(os.getcwd(), 'gpg') + if gnupg_base == program_base: + os.symlink(self.binary, new_gpg_location) + self.binary = new_gpg_location + + ## copy the original environment so that we can put it back later: + env_copy = os.environ ## this one should not be touched + path_copy = os.environ.pop('PATH') + log.debug("Created a copy of system PATH: %r" % path_copy) + assert not os.environ.has_key('PATH'), "OS env kept $PATH anyway!" + + @staticmethod + def remove_program_from_path(path, prog_base): + """Remove all directories which contain a program from PATH. + + :param str path: The contents of the system environment's + ``$PATH``. + + :param str prog_base: The directory portion of a program's + location, without the trailing slash, + and without the program name. For + example, ``prog_base='/usr/bin'``. + """ + paths = path.split(':') + for directory in paths: + if directory == prog_base: + log.debug("Found directory with target program: %s" + % directory) + path.remove(directory) + self._removed_path_entries.append(directory) + log.debug("Deleted all found instance of %s." % directory) + log.debug("PATH is now:%s%s" % (os.linesep, path)) + new_path = ':'.join([p for p in path]) + return new_path + + @staticmethod + def update_path(environment, path): + """Add paths to the string at ``os.environ['PATH']``. + + :param str environment: The environment mapping to update. + :param list path: A list of strings to update the PATH with. + """ + log.debug("Updating system path...") + os.environ = environment + new_path = ':'.join([p for p in path]) + old = '' + if 'PATH' in os.environ: + new_path = ':'.join([os.environ['PATH'], new_path]) + os.environ.update({'PATH': new_path}) + log.debug("System $PATH: %s" % os.environ['PATH']) + + modified_path = remove_program_from_path(path_copy, program_base) + update_path(env_copy, modified_path) + + ## register an _exithandler with the python interpreter: + atexit.register(update_path, env_copy, path_copy) + + def remove_symlinked_binary(symlink): + if os.path.islink(symlink): + os.unlink(symlink) + log.debug("Removed binary symlink '%s'" % symlink) + atexit.register(remove_symlinked_binary, new_gpg_location) + + @property + def default_preference_list(self): + """Get the default preference list.""" + return self._prefs + + @default_preference_list.setter + def default_preference_list(self, prefs): + """Set the default preference list. + + :param str prefs: A string containing the default preferences for + ciphers, digests, and compression algorithms. + """ + prefs = _check_preferences(prefs) + if prefs is not None: + self._prefs = prefs + + @default_preference_list.deleter + def default_preference_list(self): + """Reset the default preference list to its original state. + + Note that "original state" does not mean the default preference + list for whichever version of GnuPG is being used. It means the + default preference list defined by :attr:`GPGBase._prefs`. + + Using BZIP2 is avoided due to not interacting well with some versions + of GnuPG>=2.0.0. + """ + self._prefs = 'SHA512 SHA384 SHA256 AES256 CAMELLIA256 TWOFISH ZLIB ZIP' + + @property + def keyserver(self): + """Get the current keyserver setting.""" + return self._keyserver + + @keyserver.setter + def keyserver(self, location): + """Set the default keyserver to use for sending and receiving keys. + + The ``location`` is sent to :func:`_parsers._check_keyserver` when + option are parsed in :meth:`gnupg.GPG._make_options`. + + :param str location: A string containing the default keyserver. This + should contain the desired keyserver protocol + which is supported by the keyserver, for example, + ``'hkps://keys.mayfirst.org'``. The default + keyserver is ``'hkp://wwwkeys.pgp.net'``. + """ + self._keyserver = location + + @keyserver.deleter + def keyserver(self): + """Reset the keyserver to the default setting.""" + self._keyserver = 'hkp://wwwkeys.pgp.net' + + def _homedir_getter(self): + """Get the directory currently being used as GnuPG's homedir. + + If unspecified, use :file:`~/.config/python-gnupg/` + + :rtype: str + :returns: The absolute path to the current GnuPG homedir. + """ + return self._homedir + + def _homedir_setter(self, directory): + """Set the directory to use as GnuPG's homedir. + + If unspecified, use $HOME/.config/python-gnupg. If specified, ensure + that the ``directory`` does not contain various shell escape + characters. If ``directory`` is not found, it will be automatically + created. Lastly, the ``direcory`` will be checked that the EUID has + read and write permissions for it. + + :param str directory: A relative or absolute path to the directory to + use for storing/accessing GnuPG's files, including + keyrings and the trustdb. + :raises: :exc:`~exceptions.RuntimeError` if unable to find a suitable + directory to use. + """ + if not directory: + log.debug("GPGBase._homedir_setter(): Using default homedir: '%s'" + % _util._conf) + directory = _util._conf + + hd = _parsers._fix_unsafe(directory) + log.debug("GPGBase._homedir_setter(): got directory '%s'" % hd) + + if hd: + log.debug("GPGBase._homedir_setter(): Check existence of '%s'" % hd) + _util._create_if_necessary(hd) + + if self.ignore_homedir_permissions: + self._homedir = hd + else: + try: + log.debug("GPGBase._homedir_setter(): checking permissions") + assert _util._has_readwrite(hd), \ + "Homedir '%s' needs read/write permissions" % hd + except AssertionError as ae: + msg = ("Unable to set '%s' as GnuPG homedir" % directory) + log.debug("GPGBase.homedir.setter(): %s" % msg) + log.debug(str(ae)) + raise RuntimeError(str(ae)) + else: + log.info("Setting homedir to '%s'" % hd) + self._homedir = hd + + homedir = _util.InheritableProperty(_homedir_getter, _homedir_setter) + + def _generated_keys_getter(self): + """Get the ``homedir`` subdirectory for storing generated keys. + + :rtype: str + :returns: The absolute path to the current GnuPG homedir. + """ + return self.__generated_keys + + def _generated_keys_setter(self, directory): + """Set the directory for storing generated keys. + + If unspecified, use + :meth:`~gnupg._meta.GPGBase.homedir`/generated-keys. If specified, + ensure that the ``directory`` does not contain various shell escape + characters. If ``directory`` isn't found, it will be automatically + created. Lastly, the ``directory`` will be checked to ensure that the + current EUID has read and write permissions for it. + + :param str directory: A relative or absolute path to the directory to + use for storing/accessing GnuPG's files, including keyrings and + the trustdb. + :raises: :exc:`~exceptions.RuntimeError` if unable to find a suitable + directory to use. + """ + if not directory: + directory = os.path.join(self.homedir, 'generated-keys') + log.debug("GPGBase._generated_keys_setter(): Using '%s'" + % directory) + + hd = _parsers._fix_unsafe(directory) + log.debug("GPGBase._generated_keys_setter(): got directory '%s'" % hd) + + if hd: + log.debug("GPGBase._generated_keys_setter(): Check exists '%s'" + % hd) + _util._create_if_necessary(hd) + + try: + log.debug("GPGBase._generated_keys_setter(): check permissions") + assert _util._has_readwrite(hd), \ + "Keys dir '%s' needs read/write permissions" % hd + except AssertionError as ae: + msg = ("Unable to set '%s' as generated keys dir" % directory) + log.debug("GPGBase._generated_keys_setter(): %s" % msg) + log.debug(str(ae)) + raise RuntimeError(str(ae)) + else: + log.info("Setting homedir to '%s'" % hd) + self.__generated_keys = hd + + _generated_keys = _util.InheritableProperty(_generated_keys_getter, + _generated_keys_setter) + + def _check_sane_and_get_gpg_version(self): + """Check that everything runs alright, and grab the gpg binary's + version number while we're at it, storing it as :data:`binary_version`. + + :raises RuntimeError: if we cannot invoke the gpg binary. + """ + proc = self._open_subprocess(["--list-config", "--with-colons"]) + result = self._result_map['list'](self) + self._read_data(proc.stdout, result) + if proc.returncode: + raise RuntimeError("Error invoking gpg: %s" % result.data) + else: + proc.terminate() + + version_line = str(result.data).partition(':version:')[2] + self.binary_version = version_line.split('\n')[0] + log.debug("Using GnuPG version %s" % self.binary_version) + + def _make_args(self, args, passphrase=False): + """Make a list of command line elements for GPG. + + The value of ``args`` will be appended only if it passes the checks in + :func:`gnupg._parsers._sanitise`. The ``passphrase`` argument needs to + be True if a passphrase will be sent to GnuPG, else False. + + :param list args: A list of strings of options and flags to pass to + ``GPG.binary``. This is input safe, meaning that + these values go through strict checks (see + ``parsers._sanitise_list``) before being passed to to + the input file descriptor for the GnuPG process. + Each string should be given exactly as it would be on + the commandline interface to GnuPG, + e.g. ["--cipher-algo AES256", "--default-key + A3ADB67A2CDB8B35"]. + + :param bool passphrase: If True, the passphrase will be sent to the + stdin file descriptor for the attached GnuPG + process. + """ + ## see TODO file, tag :io:makeargs: + cmd = [self.binary, + '--no-options --no-emit-version --no-tty --status-fd 2'] + + if self.homedir: cmd.append('--homedir "%s"' % self.homedir) + + if self.keyring: + cmd.append('--no-default-keyring --keyring %s' % self.keyring) + if self.secring: + cmd.append('--secret-keyring %s' % self.secring) + + if passphrase: cmd.append('--batch --passphrase-fd 0') + + if self.use_agent is True: cmd.append('--use-agent') + elif self.use_agent is False: cmd.append('--no-use-agent') + + # The arguments for debugging and verbosity should be placed into the + # cmd list before the options/args in order to resolve Issue #76: + # https://github.com/isislovecruft/python-gnupg/issues/76 + if self.verbose: + cmd.append('--debug-all') + + if (isinstance(self.verbose, str) or + (isinstance(self.verbose, int) and (self.verbose >= 1))): + # GnuPG<=1.4.18 parses the `--debug-level` command in a way + # that is incompatible with all other GnuPG versions. :'( + if self.binary_version and (self.binary_version <= '1.4.18'): + cmd.append('--debug-level=%s' % self.verbose) + else: + cmd.append('--debug-level %s' % self.verbose) + + if self.options: + [cmd.append(opt) for opt in iter(_sanitise_list(self.options))] + if args: + [cmd.append(arg) for arg in iter(_sanitise_list(args))] + + return cmd + + def _open_subprocess(self, args=None, passphrase=False): + """Open a pipe to a GPG subprocess and return the file objects for + communicating with it. + + :param list args: A list of strings of options and flags to pass to + ``GPG.binary``. This is input safe, meaning that + these values go through strict checks (see + ``parsers._sanitise_list``) before being passed to to + the input file descriptor for the GnuPG process. + Each string should be given exactly as it would be on + the commandline interface to GnuPG, + e.g. ["--cipher-algo AES256", "--default-key + A3ADB67A2CDB8B35"]. + + :param bool passphrase: If True, the passphrase will be sent to the + stdin file descriptor for the attached GnuPG + process. + """ + ## see http://docs.python.org/2/library/subprocess.html#converting-an\ + ## -argument-sequence-to-a-string-on-windows + cmd = shlex.split(' '.join(self._make_args(args, passphrase))) + log.debug("Sending command to GnuPG process:%s%s" % (os.linesep, cmd)) + + if platform.system() == "Windows": + # TODO figure out what the hell is going on there. + expand_shell = True + else: + expand_shell = False + + return subprocess.Popen(cmd, shell=expand_shell, stdin=subprocess.PIPE, + stdout=subprocess.PIPE, stderr=subprocess.PIPE, + env={'LANGUAGE': 'en'}) + + def _read_response(self, stream, result): + """Reads all the stderr output from GPG, taking notice only of lines + that begin with the magic [GNUPG:] prefix. + + Calls methods on the response object for each valid token found, with + the arg being the remainder of the status line. + + :param stream: A byte-stream, file handle, or a + :data:`subprocess.PIPE` for parsing the status codes + from the GnuPG process. + + :param result: The result parser class from :mod:`~gnupg._parsers` ― + the ``handle_status()`` method of that class will be + called in order to parse the output of ``stream``. + """ + lines = [] + while True: + line = stream.readline() + if len(line) == 0: + break + lines.append(line) + line = line.rstrip() + + if line.startswith('[GNUPG:]'): + line = _util._deprefix(line, '[GNUPG:] ', log.status) + keyword, value = _util._separate_keyword(line) + result._handle_status(keyword, value) + elif line.startswith('gpg:'): + line = _util._deprefix(line, 'gpg: ') + keyword, value = _util._separate_keyword(line) + + # Log gpg's userland messages at our own levels: + if keyword.upper().startswith("WARNING"): + log.warn("%s" % value) + elif keyword.upper().startswith("FATAL"): + log.critical("%s" % value) + # Handle the gpg2 error where a missing trustdb.gpg is, + # for some stupid reason, considered fatal: + if value.find("trustdb.gpg") and value.find("No such file"): + result._handle_status('NEED_TRUSTDB', '') + else: + if self.verbose: + log.info("%s" % line) + else: + log.debug("%s" % line) + result.stderr = ''.join(lines) + + def _read_data(self, stream, result): + """Incrementally read from ``stream`` and store read data. + + All data gathered from calling ``stream.read()`` will be concatenated + and stored as ``result.data``. + + :param stream: An open file-like object to read() from. + :param result: An instance of one of the :ref:`result parsing classes + ` from :const:`~gnupg._meta.GPGBase._result_map`. + """ + chunks = [] + log.debug("Reading data from stream %r..." % stream.__repr__()) + + while True: + data = stream.read(1024) + if len(data) == 0: + break + chunks.append(data) + log.debug("Read %4d bytes" % len(data)) + + # Join using b'' or '', as appropriate + result.data = type(data)().join(chunks) + log.debug("Finishing reading from stream %r..." % stream.__repr__()) + log.debug("Read %4d bytes total" % len(result.data)) + + def _set_verbose(self, verbose): + """Check and set our :data:`verbose` attribute. + The debug-level must be a string or an integer. If it is one of + the allowed strings, GnuPG will translate it internally to it's + corresponding integer level: + + basic = 1-2 + advanced = 3-5 + expert = 6-8 + guru = 9+ + + If it's not one of the recognised string levels, then then + entire argument is ignored by GnuPG. :( + + To fix that stupid behaviour, if they wanted debugging but typo'd + the string level (or specified ``verbose=True``), we'll default to + 'basic' logging. + """ + string_levels = ('basic', 'advanced', 'expert', 'guru') + + if verbose is True: + # The caller wants logging, but we need a valid --debug-level + # for gpg. Default to "basic", and warn about the ambiguity. + verbose = 'basic' + + if (isinstance(verbose, str) and not (verbose in string_levels)): + verbose = 'basic' + + self.verbose = verbose + + def _collect_output(self, process, result, writer=None, stdin=None): + """Drain the subprocesses output streams, writing the collected output + to the result. If a writer thread (writing to the subprocess) is given, + make sure it's joined before returning. If a stdin stream is given, + close it before returning. + """ + stderr = codecs.getreader(self._encoding)(process.stderr) + rr = threading.Thread(target=self._read_response, + args=(stderr, result)) + rr.setDaemon(True) + log.debug('stderr reader: %r', rr) + rr.start() + + stdout = process.stdout + dr = threading.Thread(target=self._read_data, args=(stdout, result)) + dr.setDaemon(True) + log.debug('stdout reader: %r', dr) + dr.start() + + dr.join() + rr.join() + if writer is not None: + writer.join() + process.wait() + if stdin is not None: + try: + stdin.close() + except IOError: + pass + stderr.close() + stdout.close() + + def _handle_io(self, args, file, result, passphrase=False, binary=False): + """Handle a call to GPG - pass input data, collect output data.""" + p = self._open_subprocess(args, passphrase) + if not binary: + stdin = codecs.getwriter(self._encoding)(p.stdin) + else: + stdin = p.stdin + if passphrase: + _util._write_passphrase(stdin, passphrase, self._encoding) + writer = _util._threaded_copy_data(file, stdin) + self._collect_output(p, result, writer, stdin) + return result + + def _recv_keys(self, keyids, keyserver=None): + """Import keys from a keyserver. + + :param str keyids: A space-delimited string containing the keyids to + request. + :param str keyserver: The keyserver to request the ``keyids`` from; + defaults to `gnupg.GPG.keyserver`. + """ + if not keyserver: + keyserver = self.keyserver + + args = ['--keyserver {0}'.format(keyserver), + '--recv-keys {0}'.format(keyids)] + log.info('Requesting keys from %s: %s' % (keyserver, keyids)) + + result = self._result_map['import'](self) + proc = self._open_subprocess(args) + self._collect_output(proc, result) + log.debug('recv_keys result: %r', result.__dict__) + return result + + def _sign_file(self, file, default_key=None, passphrase=None, + clearsign=True, detach=False, binary=False, + digest_algo='SHA512'): + """Create a signature for a file. + + :param file: The file stream (i.e. it's already been open()'d) to sign. + :param str default_key: The key to sign with. + :param str passphrase: The passphrase to pipe to stdin. + :param bool clearsign: If True, create a cleartext signature. + :param bool detach: If True, create a detached signature. + :param bool binary: If True, do not ascii armour the output. + :param str digest_algo: The hash digest to use. Again, to see which + hashes your GnuPG is capable of using, do: + ``$ gpg --with-colons --list-config + digestname``. The default, if unspecified, is + ``'SHA512'``. + """ + log.debug("_sign_file():") + if binary: + log.info("Creating binary signature for file %s" % file) + args = ['--sign'] + else: + log.info("Creating ascii-armoured signature for file %s" % file) + args = ['--sign --armor'] + + if clearsign: + args.append("--clearsign") + if detach: + log.warn("Cannot use both --clearsign and --detach-sign.") + log.warn("Using default GPG behaviour: --clearsign only.") + elif detach and not clearsign: + args.append("--detach-sign") + + if default_key: + args.append(str("--default-key %s" % default_key)) + + args.append(str("--digest-algo %s" % digest_algo)) + + ## We could use _handle_io here except for the fact that if the + ## passphrase is bad, gpg bails and you can't write the message. + result = self._result_map['sign'](self) + + ## If the passphrase is an empty string, the message up to and + ## including its first newline will be cut off before making it to the + ## GnuPG process. Therefore, if the passphrase='' or passphrase=b'', + ## we set passphrase=None. See Issue #82: + ## https://github.com/isislovecruft/python-gnupg/issues/82 + if _util._is_string(passphrase): + passphrase = passphrase if len(passphrase) > 0 else None + elif _util._is_bytes(passphrase): + passphrase = s(passphrase) if len(passphrase) > 0 else None + else: + passphrase = None + + proc = self._open_subprocess(args, passphrase is not None) + try: + if passphrase: + _util._write_passphrase(proc.stdin, passphrase, self._encoding) + writer = _util._threaded_copy_data(file, proc.stdin) + except IOError as ioe: + log.exception("Error writing message: %s" % str(ioe)) + writer = None + self._collect_output(proc, result, writer, proc.stdin) + return result + + def _encrypt(self, data, recipients, + default_key=None, + passphrase=None, + armor=True, + encrypt=True, + symmetric=False, + always_trust=True, + output=None, + throw_keyids=False, + hidden_recipients=None, + cipher_algo='AES256', + digest_algo='SHA512', + compress_algo='ZLIB'): + """Encrypt the message read from the file-like object **data**. + + :param str data: The file or bytestream to encrypt. + + :param str recipients: The recipients to encrypt to. Recipients must + be specified keyID/fingerprint. + + .. warning:: Care should be taken in Python2 to make sure that the + given fingerprints for **recipients** are in fact strings + and not unicode objects. + + :param str default_key: The keyID/fingerprint of the key to use for + signing. If given, **data** will be encrypted + *and* signed. + + :param str passphrase: If given, and **default_key** is also given, + use this passphrase to unlock the secret + portion of the **default_key** to sign the + encrypted **data**. Otherwise, if + **default_key** is not given, but **symmetric** + is ``True``, then use this passphrase as the + passphrase for symmetric encryption. Signing + and symmetric encryption should *not* be + combined when sending the **data** to other + recipients, else the passphrase to the secret + key would be shared with them. + + :param bool armor: If True, ascii armor the output; otherwise, the + output will be in binary format. (Default: True) + + :param bool encrypt: If True, encrypt the **data** using the + **recipients** public keys. (Default: True) + + :param bool symmetric: If True, encrypt the **data** to **recipients** + using a symmetric key. See the **passphrase** + parameter. Symmetric encryption and public key + encryption can be used simultaneously, and will + result in a ciphertext which is decryptable + with either the symmetric **passphrase** or one + of the corresponding private keys. + + :param bool always_trust: If True, ignore trust warnings on + **recipients** keys. If False, display trust + warnings. (default: True) + + :type output: str or file-like object + :param output: The output file to write to. If not specified, the + encrypted output is returned, and thus should be stored + as an object in Python. For example: + + >>> import shutil + >>> import gnupg + >>> if os.path.exists("doctests"): + ... shutil.rmtree("doctests") + >>> gpg = gnupg.GPG(homedir="doctests") + >>> key_settings = gpg.gen_key_input(key_type='RSA', + ... key_length=1024, + ... key_usage='ESCA', + ... passphrase='foo') + >>> key = gpg.gen_key(key_settings) + >>> message = "The crow flies at midnight." + >>> encrypted = str(gpg.encrypt(message, key.printprint)) + >>> assert encrypted != message + >>> assert not encrypted.isspace() + >>> decrypted = str(gpg.decrypt(encrypted)) + >>> assert not decrypted.isspace() + >>> decrypted + 'The crow flies at midnight.' + + + :param bool throw_keyids: If True, make all **recipients** keyids be + zero'd out in packet information. This is the same as using + **hidden_recipients** for all **recipients**. (Default: False). + + :param list hidden_recipients: A list of recipients that should have + their keyids zero'd out in packet information. + + :param str cipher_algo: The cipher algorithm to use. To see available + algorithms with your version of GnuPG, do: + :command:`$ gpg --with-colons --list-config + ciphername`. The default **cipher_algo**, if + unspecified, is ``'AES256'``. + + :param str digest_algo: The hash digest to use. Again, to see which + hashes your GnuPG is capable of using, do: + :command:`$ gpg --with-colons --list-config + digestname`. The default, if unspecified, is + ``'SHA512'``. + + :param str compress_algo: The compression algorithm to use. Can be one + of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or + ``'Uncompressed'``. + """ + args = [] + + ## FIXME: GnuPG appears to ignore the --output directive when being + ## programmatically driven. We'll handle the IO ourselves to fix this + ## for now. + output_filename = None + if output: + if getattr(output, 'fileno', None) is not None: + ## avoid overwrite confirmation message + if getattr(output, 'name', None) is not None: + output_filename = output.name + if os.path.exists(output.name): + os.remove(output.name) + #args.append('--output %s' % output.name) + else: + output_filename = output + if os.path.exists(output): + os.remove(output) + #args.append('--output %s' % output) + + if armor: args.append('--armor') + if always_trust: args.append('--always-trust') + if cipher_algo: args.append('--cipher-algo %s' % cipher_algo) + if compress_algo: args.append('--compress-algo %s' % compress_algo) + + if default_key: + args.append('--sign') + args.append('--default-key %s' % default_key) + if digest_algo: + args.append('--digest-algo %s' % digest_algo) + + ## both can be used at the same time for an encrypted file which + ## is decryptable with a passphrase or secretkey. + if symmetric: args.append('--symmetric') + if encrypt: args.append('--encrypt') + if throw_keyids: args.append('--throw-keyids') + + if len(recipients) >= 1: + log.debug("GPG.encrypt() called for recipients '%s' with type '%s'" + % (recipients, type(recipients))) + + if isinstance(recipients, (list, tuple)): + for recp in recipients: + if not _util._py3k: + if isinstance(recp, unicode): + try: + assert _parsers._is_hex(str(recp)) + except AssertionError: + log.info("Can't accept recipient string: %s" + % recp) + else: + self._add_recipient_string(args, hidden_recipients, str(recp)) + continue + ## will give unicode in 2.x as '\uXXXX\uXXXX' + if isinstance(hidden_recipients, (list, tuple)): + if [s for s in hidden_recipients if recp in str(s)]: + args.append('--hidden-recipient %r' % recp) + else: + args.append('--recipient %r' % recp) + else: + args.append('--recipient %r' % recp) + continue + if isinstance(recp, str): + self._add_recipient_string(args, hidden_recipients, recp) + + elif (not _util._py3k) and isinstance(recp, basestring): + for recp in recipients.split('\x20'): + self._add_recipient_string(args, hidden_recipients, recp) + + elif _util._py3k and isinstance(recp, str): + for recp in recipients.split(' '): + self._add_recipient_string(args, hidden_recipients, recp) + ## ...and now that we've proven py3k is better... + else: + log.debug("Don't know what to do with recipients: %r" + % recipients) + + result = self._result_map['crypt'](self) + log.debug("Got data '%s' with type '%s'." % (data, type(data))) + self._handle_io(args, data, result, passphrase=passphrase, binary=True) + # Avoid writing raw encrypted bytes to terminal loggers and breaking + # them in that adorable way where they spew hieroglyphics until reset: + if armor: + log.debug("\n%s" % result.data) + + if output_filename: + log.info("Writing encrypted output to file: %s" % output_filename) + with open(output_filename, 'wb') as fh: + fh.write(result.data) + fh.flush() + log.info("Encrypted output written successfully.") + + return result + + def _add_recipient_string(self, args, hidden_recipients, recipient): + if isinstance(hidden_recipients, (list, tuple)): + if [s for s in hidden_recipients if recipient in str(s)]: + args.append('--hidden-recipient %s' % recipient) + else: + args.append('--recipient %s' % recipient) + else: + args.append('--recipient %s' % recipient) diff --git a/gnupg/_parsers.py b/gnupg/_parsers.py new file mode 100644 index 0000000..9de57d2 --- /dev/null +++ b/gnupg/_parsers.py @@ -0,0 +1,1551 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''Classes for parsing GnuPG status messages and sanitising commandline +options. +''' + +from __future__ import absolute_import +from __future__ import print_function + +try: + from collections import OrderedDict +except ImportError: + from ordereddict import OrderedDict + +import re + +from . import _util +from ._util import log + + +ESCAPE_PATTERN = re.compile(r'\\x([0-9a-f][0-9a-f])', re.I) +HEXIDECIMAL = re.compile('([0-9A-Fa-f]{2})+') + + +class ProtectedOption(Exception): + """Raised when the option passed to GPG is disallowed.""" + +class UsageError(Exception): + """Raised when incorrect usage of the API occurs..""" + + +def _check_keyserver(location): + """Check that a given keyserver is a known protocol and does not contain + shell escape characters. + + :param str location: A string containing the default keyserver. This + should contain the desired keyserver protocol which + is supported by the keyserver, for example, the + default is ``'hkp://wwwkeys .pgp.net'``. + :rtype: :obj:`str` or :obj:`None` + :returns: A string specifying the protocol and keyserver hostname, if the + checks passed. If not, returns None. + """ + protocols = ['hkp://', 'hkps://', 'http://', 'https://', 'ldap://', + 'mailto:'] ## xxx feels like i´m forgetting one... + for proto in protocols: + if location.startswith(proto): + url = location.replace(proto, str()) + host, slash, extra = url.partition('/') + if extra: log.warn("URI text for %s: '%s'" % (host, extra)) + log.debug("Got host string for keyserver setting: '%s'" % host) + + host = _fix_unsafe(host) + if host: + log.debug("Cleaned host string: '%s'" % host) + keyserver = proto + host + return keyserver + return None + +def _check_preferences(prefs, pref_type=None): + """Check cipher, digest, and compression preference settings. + + MD5 is not allowed. This is `not 1994`__. SHA1 is allowed_ grudgingly_. + + __ http://www.cs.colorado.edu/~jrblack/papers/md5e-full.pdf + .. _allowed: http://eprint.iacr.org/2008/469.pdf + .. _grudgingly: https://www.schneier.com/blog/archives/2012/10/when_will_we_se.html + """ + if prefs is None: return + + cipher = frozenset(['AES256', 'AES192', 'AES128', + 'CAMELLIA256', 'CAMELLIA192', + 'TWOFISH', '3DES']) + digest = frozenset(['SHA512', 'SHA384', 'SHA256', 'SHA224', 'RMD160', + 'SHA1']) + compress = frozenset(['BZIP2', 'ZLIB', 'ZIP', 'Uncompressed']) + all = frozenset([cipher, digest, compress]) + + if isinstance(prefs, str): + prefs = set(prefs.split()) + elif isinstance(prefs, list): + prefs = set(prefs) + else: + msg = "prefs must be list of strings, or space-separated string" + log.error("parsers._check_preferences(): %s" % message) + raise TypeError(message) + + if not pref_type: + pref_type = 'all' + + allowed = str() + + if pref_type == 'cipher': + allowed += ' '.join(prefs.intersection(cipher)) + if pref_type == 'digest': + allowed += ' '.join(prefs.intersection(digest)) + if pref_type == 'compress': + allowed += ' '.join(prefs.intersection(compress)) + if pref_type == 'all': + allowed += ' '.join(prefs.intersection(all)) + + return allowed + +def _fix_unsafe(shell_input): + """Find characters used to escape from a string into a shell, and wrap them in + quotes if they exist. Regex pilfered from Python3 :mod:`shlex` module. + + :param str shell_input: The input intended for the GnuPG process. + """ + _unsafe = re.compile(r'[^\w@%+=:,./-]', 256) + try: + if len(_unsafe.findall(shell_input)) == 0: + return shell_input.strip() + else: + clean = "'" + shell_input.replace("'", "'\"'\"'") + "'" + return clean + except TypeError: + return None + +def _hyphenate(input, add_prefix=False): + """Change underscores to hyphens so that object attributes can be easily + tranlated to GPG option names. + + :param str input: The attribute to hyphenate. + :param bool add_prefix: If True, add leading hyphens to the input. + :rtype: str + :return: The ``input`` with underscores changed to hyphens. + """ + ret = '--' if add_prefix else '' + ret += input.replace('_', '-') + return ret + +def _is_allowed(input): + """Check that an option or argument given to GPG is in the set of allowed + options, the latter being a strict subset of the set of all options known + to GPG. + + :param str input: An input meant to be parsed as an option or flag to the + GnuPG process. Should be formatted the same as an option + or flag to the commandline gpg, i.e. "--encrypt-files". + + :ivar frozenset gnupg_options: All known GPG options and flags. + + :ivar frozenset allowed: All allowed GPG options and flags, e.g. all GPG + options and flags which we are willing to + acknowledge and parse. If we want to support a + new option, it will need to have its own parsing + class and its name will need to be added to this + set. + + :raises: :exc:`UsageError` if **input** is not a subset of the hard-coded + set of all GnuPG options in :func:`_get_all_gnupg_options`. + + :exc:`ProtectedOption` if **input** is not in the set of allowed + options. + + :rtype: str + :return: The original **input** parameter, unmodified and unsanitized, if + no errors occur. + """ + gnupg_options = _get_all_gnupg_options() + allowed = _get_options_group("allowed") + + ## these are the allowed options we will handle so far, all others should + ## be dropped. this dance is so that when new options are added later, we + ## merely add the to the _allowed list, and the `` _allowed.issubset`` + ## assertion will check that GPG will recognise them + try: + ## check that allowed is a subset of all gnupg_options + assert allowed.issubset(gnupg_options) + except AssertionError: + raise UsageError("'allowed' isn't a subset of known options, diff: %s" + % allowed.difference(gnupg_options)) + + ## if we got a list of args, join them + ## + ## see TODO file, tag :cleanup: + if not isinstance(input, str): + input = ' '.join([x for x in input]) + + if isinstance(input, str): + if input.find('_') > 0: + if not input.startswith('--'): + hyphenated = _hyphenate(input, add_prefix=True) + else: + hyphenated = _hyphenate(input) + else: + hyphenated = input + ## xxx we probably want to use itertools.dropwhile here + try: + assert hyphenated in allowed + except AssertionError as ae: + dropped = _fix_unsafe(hyphenated) + log.warn("_is_allowed(): Dropping option '%s'..." % dropped) + raise ProtectedOption("Option '%s' not supported." % dropped) + else: + return input + return None + +def _is_hex(string): + """Check that a string is hexidecimal, with alphabetic characters + capitalized and without whitespace. + + :param str string: The string to check. + """ + matched = HEXIDECIMAL.match(string) + if matched is not None and len(matched.group()) >= 2: + return True + return False + +def _is_string(thing): + """Python character arrays are a mess. + + If Python2, check if **thing** is an :obj:`unicode` or a :obj:`str`. + If Python3, check if **thing** is a :obj:`str`. + + :param thing: The thing to check. + :returns: ``True`` if **thing** is a string according to whichever version + of Python we're running in. + """ + if _util._py3k: return isinstance(thing, str) + else: return isinstance(thing, basestring) + +def _sanitise(*args): + """Take an arg or the key portion of a kwarg and check that it is in the + set of allowed GPG options and flags, and that it has the correct + type. Then, attempt to escape any unsafe characters. If an option is not + allowed, drop it with a logged warning. Returns a dictionary of all + sanitised, allowed options. + + Each new option that we support that is not a boolean, but instead has + some additional inputs following it, i.e. "--encrypt-file foo.txt", will + need some basic safety checks added here. + + GnuPG has three-hundred and eighteen commandline flags. Also, not all + implementations of OpenPGP parse PGP packets and headers in the same way, + so there is added potential there for messing with calls to GPG. + + For information on the PGP message format specification, see + :rfc:`1991`. + + If you're asking, "Is this *really* necessary?": No, not really -- we could + just follow the security precautions recommended by `this xkcd`__. + + __ https://xkcd.com/1181/ + + :param str args: (optional) The boolean arguments which will be passed to + the GnuPG process. + :rtype: str + :returns: ``sanitised`` + """ + + ## see TODO file, tag :cleanup:sanitise: + + def _check_option(arg, value): + """Check that a single ``arg`` is an allowed option. + + If it is allowed, quote out any escape characters in ``value``, and + add the pair to :ivar:`sanitised`. Otherwise, drop them. + + :param str arg: The arguments which will be passed to the GnuPG + process, and, optionally their corresponding values. + The values are any additional arguments following the + GnuPG option or flag. For example, if we wanted to + pass ``"--encrypt --recipient isis@leap.se"`` to + GnuPG, then ``"--encrypt"`` would be an arg without a + value, and ``"--recipient"`` would also be an arg, + with a value of ``"isis@leap.se"``. + + :ivar list checked: The sanitised, allowed options and values. + :rtype: str + :returns: A string of the items in ``checked``, delimited by spaces. + """ + checked = str() + none_options = _get_options_group("none_options") + hex_options = _get_options_group("hex_options") + hex_or_none_options = _get_options_group("hex_or_none_options") + + if not _util._py3k: + if not isinstance(arg, list) and isinstance(arg, unicode): + arg = str(arg) + + try: + flag = _is_allowed(arg) + assert flag is not None, "_check_option(): got None for flag" + except (AssertionError, ProtectedOption) as error: + log.warn("_check_option(): %s" % str(error)) + else: + checked += (flag + ' ') + + if _is_string(value): + values = value.split(' ') + for v in values: + ## these can be handled separately, without _fix_unsafe(), + ## because they are only allowed if they pass the regex + if (flag in none_options) and (v is None): + continue + + if flag in hex_options: + if _is_hex(v): checked += (v + " ") + else: + log.debug("'%s %s' not hex." % (flag, v)) + if (flag in hex_or_none_options) and (v is None): + log.debug("Allowing '%s' for all keys" % flag) + continue + + elif flag in ['--keyserver']: + host = _check_keyserver(v) + if host: + log.debug("Setting keyserver: %s" % host) + checked += (v + " ") + else: log.debug("Dropping keyserver: %s" % v) + continue + + ## the rest are strings, filenames, etc, and should be + ## shell escaped: + val = _fix_unsafe(v) + try: + assert not val is None + assert not val.isspace() + assert not v is None + assert not v.isspace() + except: + log.debug("Dropping %s %s" % (flag, v)) + continue + + if flag in ['--encrypt', '--encrypt-files', '--decrypt', + '--decrypt-files', '--import', '--verify']: + if ( (_util._is_file(val)) + or + ((flag == '--verify') and (val == '-')) ): + checked += (val + " ") + else: + log.debug("%s not file: %s" % (flag, val)) + + elif flag in ['--cipher-algo', '--personal-cipher-prefs', + '--personal-cipher-preferences']: + legit_algos = _check_preferences(val, 'cipher') + if legit_algos: checked += (legit_algos + " ") + else: log.debug("'%s' is not cipher" % val) + + elif flag in ['--compress-algo', '--compression-algo', + '--personal-compress-prefs', + '--personal-compress-preferences']: + legit_algos = _check_preferences(val, 'compress') + if legit_algos: checked += (legit_algos + " ") + else: log.debug("'%s' not compress algo" % val) + + else: + checked += (val + " ") + log.debug("_check_option(): No checks for %s" % val) + + return checked.rstrip(' ') + + is_flag = lambda x: x.startswith('--') + + def _make_filo(args_string): + filo = arg.split(' ') + filo.reverse() + log.debug("_make_filo(): Converted to reverse list: %s" % filo) + return filo + + def _make_groups(filo): + groups = {} + while len(filo) >= 1: + last = filo.pop() + if is_flag(last): + log.debug("Got arg: %s" % last) + if last == '--verify': + groups[last] = str(filo.pop()) + ## accept the read-from-stdin arg: + if len(filo) >= 1 and filo[len(filo)-1] == '-': + groups[last] += str(' - ') ## gross hack + filo.pop() + else: + groups[last] = str() + while len(filo) > 1 and not is_flag(filo[len(filo)-1]): + log.debug("Got value: %s" % filo[len(filo)-1]) + groups[last] += (filo.pop() + " ") + else: + if len(filo) == 1 and not is_flag(filo[0]): + log.debug("Got value: %s" % filo[0]) + groups[last] += filo.pop() + else: + log.warn("_make_groups(): Got solitary value: %s" % last) + groups["xxx"] = last + return groups + + def _check_groups(groups): + log.debug("Got groups: %s" % groups) + checked_groups = [] + for a,v in groups.items(): + v = None if len(v) == 0 else v + safe = _check_option(a, v) + if safe is not None and not safe.strip() == "": + log.debug("Appending option: %s" % safe) + checked_groups.append(safe) + else: + log.warn("Dropped option: '%s %s'" % (a,v)) + return checked_groups + + if args is not None: + option_groups = {} + for arg in args: + ## if we're given a string with a bunch of options in it split + ## them up and deal with them separately + if (not _util._py3k and isinstance(arg, basestring)) \ + or (_util._py3k and isinstance(arg, str)): + log.debug("Got arg string: %s" % arg) + if arg.find(' ') > 0: + filo = _make_filo(arg) + option_groups.update(_make_groups(filo)) + else: + option_groups.update({ arg: "" }) + elif isinstance(arg, list): + log.debug("Got arg list: %s" % arg) + arg.reverse() + option_groups.update(_make_groups(arg)) + else: + log.warn("Got non-str/list arg: '%s', type '%s'" + % (arg, type(arg))) + checked = _check_groups(option_groups) + sanitised = ' '.join(x for x in checked) + return sanitised + else: + log.debug("Got None for args") + +def _sanitise_list(arg_list): + """A generator for iterating through a list of gpg options and sanitising + them. + + :param list arg_list: A list of options and flags for GnuPG. + :rtype: generator + :returns: A generator whose next() method returns each of the items in + ``arg_list`` after calling ``_sanitise()`` with that item as a + parameter. + """ + if isinstance(arg_list, list): + for arg in arg_list: + safe_arg = _sanitise(arg) + if safe_arg != "": + yield safe_arg + +def _get_options_group(group=None): + """Get a specific group of options which are allowed.""" + + #: These expect a hexidecimal keyid as their argument, and can be parsed + #: with :func:`_is_hex`. + hex_options = frozenset(['--check-sigs', + '--default-key', + '--default-recipient', + '--delete-keys', + '--delete-secret-keys', + '--delete-secret-and-public-keys', + '--desig-revoke', + '--export', + '--export-secret-keys', + '--export-secret-subkeys', + '--fingerprint', + '--gen-revoke', + '--hidden-encrypt-to', + '--hidden-recipient', + '--list-key', + '--list-keys', + '--list-public-keys', + '--list-secret-keys', + '--list-sigs', + '--recipient', + '--recv-keys', + '--send-keys', + ]) + #: These options expect value which are left unchecked, though still run + #: through :func:`_fix_unsafe`. + unchecked_options = frozenset(['--list-options', + '--passphrase-fd', + '--status-fd', + '--verify-options', + ]) + #: These have their own parsers and don't really fit into a group + other_options = frozenset(['--debug-level', + '--keyserver', + + ]) + #: These should have a directory for an argument + dir_options = frozenset(['--homedir', + ]) + #: These expect a keyring or keyfile as their argument + keyring_options = frozenset(['--keyring', + '--primary-keyring', + '--secret-keyring', + '--trustdb-name', + ]) + #: These expect a filename (or the contents of a file as a string) or None + #: (meaning that they read from stdin) + file_or_none_options = frozenset(['--decrypt', + '--decrypt-files', + '--encrypt', + '--encrypt-files', + '--import', + '--verify', + '--verify-files', + '--output', + ]) + #: These options expect a string. see :func:`_check_preferences`. + pref_options = frozenset(['--digest-algo', + '--cipher-algo', + '--compress-algo', + '--compression-algo', + '--cert-digest-algo', + '--personal-digest-prefs', + '--personal-digest-preferences', + '--personal-cipher-prefs', + '--personal-cipher-preferences', + '--personal-compress-prefs', + '--personal-compress-preferences', + '--print-md', + ]) + #: These options expect no arguments + none_options = frozenset(['--always-trust', + '--armor', + '--armour', + '--batch', + '--check-sigs', + '--check-trustdb', + '--clearsign', + '--debug-all', + '--default-recipient-self', + '--detach-sign', + '--export', + '--export-ownertrust', + '--export-secret-keys', + '--export-secret-subkeys', + '--fingerprint', + '--fixed-list-mode', + '--gen-key', + '--import-ownertrust', + '--list-config', + '--list-key', + '--list-keys', + '--list-packets', + '--list-public-keys', + '--list-secret-keys', + '--list-sigs', + '--lock-multiple', + '--lock-never', + '--lock-once', + '--no-default-keyring', + '--no-default-recipient', + '--no-emit-version', + '--no-options', + '--no-tty', + '--no-use-agent', + '--no-verbose', + '--print-mds', + '--quiet', + '--sign', + '--symmetric', + '--throw-keyids', + '--use-agent', + '--verbose', + '--version', + '--with-colons', + '--yes', + ]) + #: These options expect either None or a hex string + hex_or_none_options = hex_options.intersection(none_options) + allowed = hex_options.union(unchecked_options, other_options, dir_options, + keyring_options, file_or_none_options, + pref_options, none_options) + + if group and group in locals().keys(): + return locals()[group] + +def _get_all_gnupg_options(): + """Get all GnuPG options and flags. + + This is hardcoded within a local scope to reduce the chance of a tampered + GnuPG binary reporting falsified option sets, i.e. because certain options + (namedly the ``--no-options`` option, which prevents the usage of gpg.conf + files) are necessary and statically specified in + :meth:`gnupg._meta.GPGBase._make_args`, if the inputs into Python are + already controlled, and we were to summon the GnuPG binary to ask it for + its options, it would be possible to receive a falsified options set + missing the ``--no-options`` option in response. This seems unlikely, and + the method is stupid and ugly, but at least we'll never have to debug + whether or not an option *actually* disappeared in a different GnuPG + version, or some funny business is happening. + + These are the options as of GnuPG 1.4.12; the current stable branch of the + 2.1.x tree contains a few more -- if you need them you'll have to add them + in here. + + :type gnupg_options: frozenset + :ivar gnupg_options: All known GPG options and flags. + :rtype: frozenset + :returns: ``gnupg_options`` + """ + three_hundred_eighteen = (""" +--allow-freeform-uid --multifile +--allow-multiple-messages --no +--allow-multisig-verification --no-allow-freeform-uid +--allow-non-selfsigned-uid --no-allow-multiple-messages +--allow-secret-key-import --no-allow-non-selfsigned-uid +--always-trust --no-armor +--armor --no-armour +--armour --no-ask-cert-expire +--ask-cert-expire --no-ask-cert-level +--ask-cert-level --no-ask-sig-expire +--ask-sig-expire --no-auto-check-trustdb +--attribute-fd --no-auto-key-locate +--attribute-file --no-auto-key-retrieve +--auto-check-trustdb --no-batch +--auto-key-locate --no-comments +--auto-key-retrieve --no-default-keyring +--batch --no-default-recipient +--bzip2-compress-level --no-disable-mdc +--bzip2-decompress-lowmem --no-emit-version +--card-edit --no-encrypt-to +--card-status --no-escape-from-lines +--cert-digest-algo --no-expensive-trust-checks +--cert-notation --no-expert +--cert-policy-url --no-force-mdc +--change-pin --no-force-v3-sigs +--charset --no-force-v4-certs +--check-sig --no-for-your-eyes-only +--check-sigs --no-greeting +--check-trustdb --no-groups +--cipher-algo --no-literal +--clearsign --no-mangle-dos-filenames +--command-fd --no-mdc-warning +--command-file --no-options +--comment --no-permission-warning +--completes-needed --no-pgp2 +--compress-algo --no-pgp6 +--compression-algo --no-pgp7 +--compress-keys --no-pgp8 +--compress-level --no-random-seed-file +--compress-sigs --no-require-backsigs +--ctapi-driver --no-require-cross-certification +--dearmor --no-require-secmem +--dearmour --no-rfc2440-text +--debug --no-secmem-warning +--debug-all --no-show-notation +--debug-ccid-driver --no-show-photos +--debug-level --no-show-policy-url +--decrypt --no-sig-cache +--decrypt-files --no-sig-create-check +--default-cert-check-level --no-sk-comments +--default-cert-expire --no-strict +--default-cert-level --notation-data +--default-comment --not-dash-escaped +--default-key --no-textmode +--default-keyserver-url --no-throw-keyid +--default-preference-list --no-throw-keyids +--default-recipient --no-tty +--default-recipient-self --no-use-agent +--default-sig-expire --no-use-embedded-filename +--delete-keys --no-utf8-strings +--delete-secret-and-public-keys --no-verbose +--delete-secret-keys --no-version +--desig-revoke --openpgp +--detach-sign --options +--digest-algo --output +--disable-ccid --override-session-key +--disable-cipher-algo --passphrase +--disable-dsa2 --passphrase-fd +--disable-mdc --passphrase-file +--disable-pubkey-algo --passphrase-repeat +--display --pcsc-driver +--display-charset --personal-cipher-preferences +--dry-run --personal-cipher-prefs +--dump-options --personal-compress-preferences +--edit-key --personal-compress-prefs +--emit-version --personal-digest-preferences +--enable-dsa2 --personal-digest-prefs +--enable-progress-filter --pgp2 +--enable-special-filenames --pgp6 +--enarmor --pgp7 +--enarmour --pgp8 +--encrypt --photo-viewer +--encrypt-files --pipemode +--encrypt-to --preserve-permissions +--escape-from-lines --primary-keyring +--exec-path --print-md +--exit-on-status-write-error --print-mds +--expert --quick-random +--export --quiet +--export-options --reader-port +--export-ownertrust --rebuild-keydb-caches +--export-secret-keys --recipient +--export-secret-subkeys --recv-keys +--fast-import --refresh-keys +--fast-list-mode --remote-user +--fetch-keys --require-backsigs +--fingerprint --require-cross-certification +--fixed-list-mode --require-secmem +--fix-trustdb --rfc1991 +--force-mdc --rfc2440 +--force-ownertrust --rfc2440-text +--force-v3-sigs --rfc4880 +--force-v4-certs --run-as-shm-coprocess +--for-your-eyes-only --s2k-cipher-algo +--gen-key --s2k-count +--gen-prime --s2k-digest-algo +--gen-random --s2k-mode +--gen-revoke --search-keys +--gnupg --secret-keyring +--gpg-agent-info --send-keys +--gpgconf-list --set-filename +--gpgconf-test --set-filesize +--group --set-notation +--help --set-policy-url +--hidden-encrypt-to --show-keyring +--hidden-recipient --show-notation +--homedir --show-photos +--honor-http-proxy --show-policy-url +--ignore-crc-error --show-session-key +--ignore-mdc-error --sig-keyserver-url +--ignore-time-conflict --sign +--ignore-valid-from --sign-key +--import --sig-notation +--import-options --sign-with +--import-ownertrust --sig-policy-url +--interactive --simple-sk-checksum +--keyid-format --sk-comments +--keyring --skip-verify +--keyserver --status-fd +--keyserver-options --status-file +--lc-ctype --store +--lc-messages --strict +--limit-card-insert-tries --symmetric +--list-config --temp-directory +--list-key --textmode +--list-keys --throw-keyid +--list-only --throw-keyids +--list-options --trustdb-name +--list-ownertrust --trusted-key +--list-packets --trust-model +--list-public-keys --try-all-secrets +--list-secret-keys --ttyname +--list-sig --ttytype +--list-sigs --ungroup +--list-trustdb --update-trustdb +--load-extension --use-agent +--local-user --use-embedded-filename +--lock-multiple --user +--lock-never --utf8-strings +--lock-once --verbose +--logger-fd --verify +--logger-file --verify-files +--lsign-key --verify-options +--mangle-dos-filenames --version +--marginals-needed --warranty +--max-cert-depth --with-colons +--max-output --with-fingerprint +--merge-only --with-key-data +--min-cert-level --yes +""").split() + + # These are extra options which only exist for GnuPG>=2.0.0 + three_hundred_eighteen.append('--export-ownertrust') + three_hundred_eighteen.append('--import-ownertrust') + + gnupg_options = frozenset(three_hundred_eighteen) + return gnupg_options + +def nodata(status_code): + """Translate NODATA status codes from GnuPG to messages.""" + lookup = { + '1': 'No armored data.', + '2': 'Expected a packet but did not find one.', + '3': 'Invalid packet found, this may indicate a non OpenPGP message.', + '4': 'Signature expected but not found.' } + for key, value in lookup.items(): + if str(status_code) == key: + return value + +def progress(status_code): + """Translate PROGRESS status codes from GnuPG to messages.""" + lookup = { + 'pk_dsa': 'DSA key generation', + 'pk_elg': 'Elgamal key generation', + 'primegen': 'Prime generation', + 'need_entropy': 'Waiting for new entropy in the RNG', + 'tick': 'Generic tick without any special meaning - still working.', + 'starting_agent': 'A gpg-agent was started.', + 'learncard': 'gpg-agent or gpgsm is learning the smartcard data.', + 'card_busy': 'A smartcard is still working.' } + for key, value in lookup.items(): + if str(status_code) == key: + return value + + +class GenKey(object): + """Handle status messages for key generation. + + Calling the ``__str__()`` method of this class will return the generated + key's fingerprint, or a status string explaining the results. + """ + def __init__(self, gpg): + self._gpg = gpg + ## this should get changed to something more useful, like 'key_type' + #: 'P':= primary, 'S':= subkey, 'B':= both + self.type = None + self.fingerprint = None + self.status = None + self.subkey_created = False + self.primary_created = False + #: This will store the key's public keyring filename, if + #: :meth:`~gnupg.GPG.gen_key_input` was called with + #: ``separate_keyring=True``. + self.keyring = None + #: This will store the key's secret keyring filename, if : + #: :meth:`~gnupg.GPG.gen_key_input` was called with + #: ``separate_keyring=True``. + self.secring = None + + def __nonzero__(self): + if self.fingerprint: return True + return False + __bool__ = __nonzero__ + + def __str__(self): + if self.fingerprint: + return self.fingerprint + else: + if self.status is not None: + return self.status + else: + return False + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key in ("GOOD_PASSPHRASE"): + pass + elif key == "KEY_NOT_CREATED": + self.status = 'key not created' + elif key == "KEY_CREATED": + (self.type, self.fingerprint) = value.split() + self.status = 'key created' + elif key == "NODATA": + self.status = nodata(value) + elif key == "PROGRESS": + self.status = progress(value.split(' ', 1)[0]) + else: + raise ValueError("Unknown status message: %r" % key) + + if self.type in ('B', 'P'): + self.primary_created = True + if self.type in ('B', 'S'): + self.subkey_created = True + +class DeleteResult(object): + """Handle status messages for --delete-keys and --delete-secret-keys""" + def __init__(self, gpg): + self._gpg = gpg + self.status = 'ok' + + def __str__(self): + return self.status + + problem_reason = { '1': 'No such key', + '2': 'Must delete secret key first', + '3': 'Ambigious specification', } + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key == "DELETE_PROBLEM": + self.status = self.problem_reason.get(value, "Unknown error: %r" + % value) + else: + raise ValueError("Unknown status message: %r" % key) + +class Sign(object): + """Parse GnuPG status messages for signing operations. + + :param gpg: An instance of :class:`gnupg.GPG`. + """ + + #: The type of signature created. + sig_type = None + #: The algorithm used to create the signature. + sig_algo = None + #: The hash algorithm used to create the signature. + sig_hash_also = None + #: The fingerprint of the signing keyid. + fingerprint = None + #: The timestamp on the signature. + timestamp = None + #: xxx fill me in + what = None + status = None + + def __init__(self, gpg): + self._gpg = gpg + + def __nonzero__(self): + """Override the determination for truthfulness evaluation. + + :rtype: bool + :returns: True if we have a valid signature, False otherwise. + """ + return self.fingerprint is not None + __bool__ = __nonzero__ + + def __str__(self): + return self.data.decode(self._gpg._encoding, self._gpg._decode_errors) + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key in ("USERID_HINT", "NEED_PASSPHRASE", "BAD_PASSPHRASE", + "GOOD_PASSPHRASE", "MISSING_PASSPHRASE", "PINENTRY_LAUNCHED", + "BEGIN_SIGNING", "CARDCTRL", "INV_SGNR", "SIGEXPIRED"): + self.status = key.replace("_", " ").lower() + elif key == "SIG_CREATED": + (self.sig_type, self.sig_algo, self.sig_hash_algo, + self.what, self.timestamp, self.fingerprint) = value.split() + elif key == "KEYEXPIRED": + self.status = "skipped signing key, key expired" + if (value is not None) and (len(value) > 0): + self.status += " on {}".format(str(value)) + elif key == "KEYREVOKED": + self.status = "skipped signing key, key revoked" + if (value is not None) and (len(value) > 0): + self.status += " on {}".format(str(value)) + elif key == "NODATA": + self.status = nodata(value) + else: + raise ValueError("Unknown status message: %r" % key) + + +class ListKeys(list): + """Handle status messages for --list-keys. + + Handles pub and uid (relating the latter to the former). Don't care about + the following attributes/status messages (from doc/DETAILS): + + | crt = X.509 certificate + | crs = X.509 certificate and private key available + | ssb = secret subkey (secondary key) + | uat = user attribute (same as user id except for field 10). + | rev = revocation signature + | pkd = public key data (special field format, see below) + | grp = reserved for gpgsm + | rvk = revocation key + """ + + def __init__(self, gpg): + super(ListKeys, self).__init__() + self._gpg = gpg + self.curkey = None + self.curuid = None + self.fingerprints = [] + self.uids = [] + self.sigs = {} + + def key(self, args): + vars = (""" + type trust length algo keyid date expires dummy ownertrust uid + """).split() + self.curkey = {} + for i in range(len(vars)): + self.curkey[vars[i]] = args[i] + self.curkey['uids'] = [] + self.curkey['sigs'] = {} + if self.curkey['uid']: + self.curuid = self.curkey['uid'] + self.curkey['uids'].append(self.curuid) + self.sigs[self.curuid] = set() + self.curkey['sigs'][self.curuid] = [] + del self.curkey['uid'] + self.curkey['subkeys'] = [] + self.append(self.curkey) + + pub = sec = key + + def fpr(self, args): + self.curkey['fingerprint'] = args[9] + self.fingerprints.append(args[9]) + + def uid(self, args): + uid = args[9] + uid = ESCAPE_PATTERN.sub(lambda m: chr(int(m.group(1), 16)), uid) + self.curkey['uids'].append(uid) + self.curuid = uid + self.curkey['sigs'][uid] = [] + self.sigs[uid] = set() + self.uids.append(uid) + + def sig(self, args): + vars = (""" + type trust length algo keyid date expires dummy ownertrust uid + """).split() + sig = {} + for i in range(len(vars)): + sig[vars[i]] = args[i] + self.curkey['sigs'][self.curuid].append(sig) + self.sigs[self.curuid].add(sig['keyid']) + + def sub(self, args): + subkey = [args[4], args[11]] + self.curkey['subkeys'].append(subkey) + + def _handle_status(self, key, value): + pass + + +class ImportResult(object): + """Parse GnuPG status messages for key import operations.""" + + def __init__(self, gpg): + """Start parsing the results of a key import operation. + + :type gpg: :class:`gnupg.GPG` + :param gpg: An instance of :class:`gnupg.GPG`. + """ + self._gpg = gpg + + #: A map from GnuPG codes shown with the ``IMPORT_OK`` status message + #: to their human-meaningful English equivalents. + self._ok_reason = {'0': 'Not actually changed', + '1': 'Entirely new key', + '2': 'New user IDs', + '4': 'New signatures', + '8': 'New subkeys', + '16': 'Contains private key', + '17': 'Contains private key',} + + #: A map from GnuPG codes shown with the ``IMPORT_PROBLEM`` status + #: message to their human-meaningful English equivalents. + self._problem_reason = { '0': 'No specific reason given', + '1': 'Invalid Certificate', + '2': 'Issuer Certificate missing', + '3': 'Certificate Chain too long', + '4': 'Error storing certificate', } + + #: All the possible status messages pertaining to actions taken while + #: importing a key. + self._fields = '''count no_user_id imported imported_rsa unchanged + n_uids n_subk n_sigs n_revoc sec_read sec_imported sec_dups + not_imported'''.split() + + #: Counts of all the status message results, :data:`_fields` which + #: have appeared. + self.counts = OrderedDict( + zip(self._fields, [int(0) for x in range(len(self._fields))])) + + #: A list of strings containing the fingerprints of the GnuPG keyIDs + #: imported. + self.fingerprints = list() + + #: A list containing dictionaries with information gathered on keys + #: imported. + self.results = list() + + def __nonzero__(self): + """Override the determination for truthfulness evaluation. + + :rtype: bool + :returns: True if we have immport some keys, False otherwise. + """ + if self.counts['not_imported'] > 0: return False + if len(self.fingerprints) == 0: return False + return True + __bool__ = __nonzero__ + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises ValueError: if the status message is unknown. + """ + if key == "IMPORTED": + # this duplicates info we already see in import_ok & import_problem + pass + elif key == "NODATA": + self.results.append({'fingerprint': None, + 'status': 'No valid data found'}) + elif key == "IMPORT_OK": + reason, fingerprint = value.split() + reasons = [] + for code, text in self._ok_reason.items(): + if int(reason) == int(code): + reasons.append(text) + reasontext = '\n'.join(reasons) + "\n" + self.results.append({'fingerprint': fingerprint, + 'status': reasontext}) + self.fingerprints.append(fingerprint) + elif key == "IMPORT_PROBLEM": + try: + reason, fingerprint = value.split() + except: + reason = value + fingerprint = '' + self.results.append({'fingerprint': fingerprint, + 'status': self._problem_reason[reason]}) + elif key == "IMPORT_RES": + import_res = value.split() + for x in self.counts.keys(): + self.counts[x] = int(import_res.pop(0)) + elif key == "KEYEXPIRED": + res = {'fingerprint': None, + 'status': 'Key expired'} + self.results.append(res) + ## Accoring to docs/DETAILS L859, SIGEXPIRED is obsolete: + ## "Removed on 2011-02-04. This is deprecated in favor of KEYEXPIRED." + elif key == "SIGEXPIRED": + res = {'fingerprint': None, + 'status': 'Signature expired'} + self.results.append(res) + else: + raise ValueError("Unknown status message: %r" % key) + + def summary(self): + l = [] + l.append('%d imported' % self.counts['imported']) + if self.counts['not_imported']: + l.append('%d not imported' % self.counts['not_imported']) + return ', '.join(l) + + +class Verify(object): + """Parser for status messages from GnuPG for certifications and signature + verifications. + + People often mix these up, or think that they are the same thing. While it + is true that certifications and signatures *are* the same cryptographic + operation -- and also true that both are the same as the decryption + operation -- a distinction is made for important reasons. + + A certification: + * is made on a key, + * can help to validate or invalidate the key owner's identity, + * can assign trust levels to the key (or to uids and/or subkeys that + the key contains), + * and can be used in absense of in-person fingerprint checking to try + to build a path (through keys whose fingerprints have been checked) + to the key, so that the identity of the key's owner can be more + reliable without having to actually physically meet in person. + + A signature: + * is created for a file or other piece of data, + * can help to prove that the data hasn't been altered, + * and can help to prove that the data was sent by the person(s) in + possession of the private key that created the signature, and for + parsing portions of status messages from decryption operations. + + There are probably other things unique to each that have been + scatterbrainedly omitted due to the programmer sitting still and staring + at GnuPG debugging logs for too long without snacks, but that is the gist + of it. + """ + + TRUST_UNDEFINED = 0 + TRUST_NEVER = 1 + TRUST_MARGINAL = 2 + TRUST_FULLY = 3 + TRUST_ULTIMATE = 4 + + TRUST_LEVELS = {"TRUST_UNDEFINED" : TRUST_UNDEFINED, + "TRUST_NEVER" : TRUST_NEVER, + "TRUST_MARGINAL" : TRUST_MARGINAL, + "TRUST_FULLY" : TRUST_FULLY, + "TRUST_ULTIMATE" : TRUST_ULTIMATE,} + + def __init__(self, gpg): + """Create a parser for verification and certification commands. + + :param gpg: An instance of :class:`gnupg.GPG`. + """ + self._gpg = gpg + #: True if the signature is valid, False otherwise. + self.valid = False + #: A string describing the status of the signature verification. + #: Can be one of ``signature bad``, ``signature good``, + #: ``signature valid``, ``signature error``, ``decryption failed``, + #: ``no public key``, ``key exp``, or ``key rev``. + self.status = None + #: The fingerprint of the signing keyid. + self.fingerprint = None + #: The fingerprint of the corresponding public key, which may be + #: different if the signature was created with a subkey. + self.pubkey_fingerprint = None + #: The keyid of the signing key. + self.key_id = None + #: The id of the signature itself. + self.signature_id = None + #: The creation date of the signing key. + self.creation_date = None + #: The timestamp of the purported signature, if we are unable to parse + #: and/or validate it. + self.timestamp = None + #: The timestamp for when the valid signature was created. + self.sig_timestamp = None + #: The userid of the signing key which was used to create the + #: signature. + self.username = None + #: When the signing key is due to expire. + self.expire_timestamp = None + #: An integer 0-4 describing the trust level of the signature. + self.trust_level = None + #: The string corresponding to the ``trust_level`` number. + self.trust_text = None + #: The subpackets. These are stored as a dictionary, in the following + #: form: + #: Verify.subpackets = {'SUBPACKET_NUMBER': {'flags': FLAGS, + #: 'length': LENGTH, + #: 'data': DATA}, + #: 'ANOTHER_SUBPACKET_NUMBER': {...}} + self.subpackets = {} + #: The signature or key notations. These are also stored as a + #: dictionary, in the following form: + #: + #: Verify.notations = {NOTATION_NAME: NOTATION_DATA} + #: + #: For example, the Bitcoin core developer, Peter Todd, encodes in + #: every signature the header of the latest block on the Bitcoin + #: blockchain (to prove that a GnuPG signature that Peter made was made + #: *after* a specific point in time). These look like: + #: + #: gpg: Signature notation: blockhash@bitcoin.org=000000000000000006f793d4461ee3e756ff04cc62581c96a42ed67dc233da3a + #: + #: Which python-gnupg would store as: + #: + #: Verify.notations['blockhash@bitcoin.org'] = '000000000000000006f793d4461ee3e756ff04cc62581c96a42ed67dc233da3a' + self.notations = {} + + #: This will be a str or None. If not None, it is the last + #: ``NOTATION_NAME`` we stored in the ``notations`` dict. Because we're + #: not assured that a ``NOTATION_DATA`` status will arrive *immediately* + #: after its corresponding ``NOTATION_NAME``, we store the latest + #: ``NOTATION_NAME`` here until we get its corresponding + #: ``NOTATION_DATA``. + self._last_notation_name = None + + def __nonzero__(self): + """Override the determination for truthfulness evaluation. + + :rtype: bool + :returns: True if we have a valid signature, False otherwise. + """ + return self.valid + __bool__ = __nonzero__ + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key in self.TRUST_LEVELS: + self.trust_text = key + self.trust_level = self.TRUST_LEVELS[key] + elif key in ("RSA_OR_IDEA", "NODATA", "IMPORT_RES", "PLAINTEXT", + "PLAINTEXT_LENGTH", "POLICY_URL", "DECRYPTION_INFO", + "DECRYPTION_OKAY", "INV_SGNR", "PROGRESS", + "PINENTRY_LAUNCHED"): + pass + elif key == "BADSIG": + self.valid = False + self.status = 'signature bad' + self.key_id, self.username = value.split(None, 1) + elif key == "GOODSIG": + self.valid = True + self.status = 'signature good' + self.key_id, self.username = value.split(None, 1) + elif key == "VALIDSIG": + self.valid = True + (self.fingerprint, + self.creation_date, + self.sig_timestamp, + self.expire_timestamp) = value.split()[:4] + # may be different if signature is made with a subkey + self.pubkey_fingerprint = value.split()[-1] + self.status = 'signature valid' + elif key == "SIG_ID": + (self.signature_id, + self.creation_date, self.timestamp) = value.split() + elif key == "ERRSIG": + self.valid = False + (self.key_id, + algo, hash_algo, + cls, + self.timestamp) = value.split()[:5] + self.status = 'signature error' + elif key == "DECRYPTION_FAILED": + self.valid = False + self.key_id = value + self.status = 'decryption failed' + elif key == "NO_PUBKEY": + self.valid = False + self.key_id = value + self.status = 'no public key' + # These are useless in Verify, since they are spit out for any + # pub/subkeys on the key, not just the one doing the signing. + # if we want to check for signatures make with expired key, + # the relevant flags are REVKEYSIG and KEYREVOKED. + elif key in ("KEYEXPIRED", "SIGEXPIRED"): + pass + # The signature has an expiration date which has already passed + # (EXPKEYSIG), or the signature has been revoked (REVKEYSIG): + elif key in ("EXPKEYSIG", "REVKEYSIG"): + self.valid = False + self.key_id = value.split()[0] + self.status = (('%s %s') % (key[:3], key[3:])).lower() + # This is super annoying, and bad design on the part of GnuPG, in my + # opinion. + # + # This flag can get triggered if a valid signature is made, and then + # later the key (or subkey) which created the signature is + # revoked. When this happens, GnuPG will output: + # + # REVKEYSIG 075BFD18B365D34C Test Expired Key + # VALIDSIG DAB69B05F591640B7F4DCBEA075BFD18B365D34C 2014-09-26 1411700539 0 4 0 1 2 00 4BA800F77452A6C29447FF20F4AF76ACBBE22CE2 + # KEYREVOKED + # + # Meaning that we have a timestamp for when the signature was created, + # and we know that the signature is valid, but since GnuPG gives us no + # timestamp for when the key was revoked... we have no ability to + # determine if the valid signature was made *before* the signing key + # was revoked or *after*. Meaning that if you are like me and you sign + # all your software releases and git commits, and you also practice + # good opsec by doing regular key rotations, your old signatures made + # by your expired/revoked keys (even though they were created when the + # key was still good) are considered bad because GnuPG is a + # braindamaged piece of shit. + # + # Software engineering, motherfuckers, DO YOU SPEAK IT? + # + # The signing key which created the signature has since been revoked + # (KEYREVOKED), and we're going to ignore it (but add something to the + # status message): + elif key in ("KEYREVOKED"): + self.status = '\n'.join([self.status, "key revoked"]) + # SIG_SUBPACKET + # This indicates that a signature subpacket was seen. The format is + # the same as the "spk" record above. + # + # [...] + # + # SPK - Signature subpacket records + # + # - Field 2 :: Subpacket number as per RFC-4880 and later. + # - Field 3 :: Flags in hex. Currently the only two bits assigned + # are 1, to indicate that the subpacket came from the + # hashed part of the signature, and 2, to indicate the + # subpacket was marked critical. + # - Field 4 :: Length of the subpacket. Note that this is the + # length of the subpacket, and not the length of field + # 5 below. Due to the need for %-encoding, the length + # of field 5 may be up to 3x this value. + # - Field 5 :: The subpacket data. Printable ASCII is shown as + # ASCII, but other values are rendered as %XX where XX + # is the hex value for the byte. + elif key in ("SIG_SUBPACKET"): + fields = value.split() + try: + subpacket_number = fields[0] + self.subpackets[subpacket_number] = {'flags': None, + 'length': None, + 'data': None} + except IndexError: + # We couldn't parse the subpacket type (an RFC4880 + # identifier), so we shouldn't continue parsing. + pass + else: + # Pull as much data as we can parse out of the subpacket: + try: + self.subpackets[subpacket_number]['flags'] = fields[1] + self.subpackets[subpacket_number]['length'] = fields[2] + self.subpackets[subpacket_number]['data'] = fields[3] + except IndexError: + pass + # NOTATION_ + # There are actually two related status codes to convey notation + # data: + # + # - NOTATION_NAME + # - NOTATION_DATA + # + # and are %XX escaped; the data may be split among + # several NOTATION_DATA lines. + elif key.startswith("NOTATION_"): + if key.endswith("NAME"): + self.notations[value] = str() + self._last_notation_name = value + elif key.endswith("DATA"): + if self._last_notation_name is not None: + # Append the NOTATION_DATA to any previous data we + # received for that NOTATION_NAME: + self.notations[self._last_notation_name] += value + else: + pass + else: + raise ValueError("Unknown status message: %r" % key) + + +class Crypt(Verify): + """Parser for internal status messages from GnuPG for ``--encrypt``, + ``--decrypt``, and ``--decrypt-files``. + """ + def __init__(self, gpg): + Verify.__init__(self, gpg) + self._gpg = gpg + #: A string containing the encrypted or decrypted data. + self.data = '' + #: True if the decryption/encryption process turned out okay. + self.ok = False + #: A string describing the current processing status, or error, if one + #: has occurred. + self.status = None + self.data_format = None + self.data_timestamp = None + self.data_filename = None + + def __nonzero__(self): + if self.ok: return True + return False + __bool__ = __nonzero__ + + def __str__(self): + """The str() method for a :class:`Crypt` object will automatically return the + decoded data string, which stores the encryped or decrypted data. + + In other words, these two statements are equivalent: + + >>> assert decrypted.data == str(decrypted) + + """ + return self.data.decode(self._gpg._encoding, self._gpg._decode_errors) + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key in ("ENC_TO", "USERID_HINT", "GOODMDC", "END_DECRYPTION", + "BEGIN_SIGNING", "NO_SECKEY", "ERROR", "NODATA", + "CARDCTRL"): + # in the case of ERROR, this is because a more specific error + # message will have come first + pass + elif key in ("NEED_PASSPHRASE", "BAD_PASSPHRASE", "GOOD_PASSPHRASE", + "MISSING_PASSPHRASE", "DECRYPTION_FAILED", + "KEY_NOT_CREATED"): + self.status = key.replace("_", " ").lower() + elif key == "NEED_TRUSTDB": + self._gpg._create_trustdb() + elif key == "NEED_PASSPHRASE_SYM": + self.status = 'need symmetric passphrase' + elif key == "BEGIN_DECRYPTION": + self.status = 'decryption incomplete' + elif key == "BEGIN_ENCRYPTION": + self.status = 'encryption incomplete' + elif key == "DECRYPTION_OKAY": + self.status = 'decryption ok' + self.ok = True + elif key == "END_ENCRYPTION": + self.status = 'encryption ok' + self.ok = True + elif key == "INV_RECP": + self.status = 'invalid recipient' + elif key == "KEYEXPIRED": + self.status = 'key expired' + elif key == "KEYREVOKED": + self.status = 'key revoked' + elif key == "SIG_CREATED": + self.status = 'sig created' + elif key == "SIGEXPIRED": + self.status = 'sig expired' + elif key == "PLAINTEXT": + fmt, dts = value.split(' ', 1) + if dts.find(' ') > 0: + self.data_timestamp, self.data_filename = dts.split(' ', 1) + else: + self.data_timestamp = dts + ## GnuPG gives us a hex byte for an ascii char corresponding to + ## the data format of the resulting plaintext, + ## i.e. '62'→'b':= binary data + self.data_format = chr(int(str(fmt), 16)) + else: + super(Crypt, self)._handle_status(key, value) + +class ListPackets(object): + """Handle status messages for --list-packets.""" + + def __init__(self, gpg): + self._gpg = gpg + #: A string describing the current processing status, or error, if one + #: has occurred. + self.status = None + #: True if the passphrase to a public/private keypair is required. + self.need_passphrase = None + #: True if a passphrase for a symmetric key is required. + self.need_passphrase_sym = None + #: The keyid and uid which this data is encrypted to. + self.userid_hint = None + #: The first key that we detected that a message was encrypted + #: to. This is provided for backwards compatibility. As of Issue #77_, + #: the ``encrypted_to`` attribute should be used instead. + self.key = None + #: A list of keyid's that the message has been encrypted to. + self.encrypted_to = [] + + def _handle_status(self, key, value): + """Parse a status code from the attached GnuPG process. + + :raises: :exc:`~exceptions.ValueError` if the status message is unknown. + """ + if key in ('NO_SECKEY', 'BEGIN_DECRYPTION', 'DECRYPTION_FAILED', + 'END_DECRYPTION', 'GOOD_PASSPHRASE', 'BAD_PASSPHRASE'): + pass + elif key == 'NODATA': + self.status = nodata(value) + elif key == 'ENC_TO': + key, _, _ = value.split() + if not self.key: + self.key = key + self.encrypted_to.append(key) + elif key == ('NEED_PASSPHRASE', 'MISSING_PASSPHRASE'): + self.need_passphrase = True + elif key == 'NEED_PASSPHRASE_SYM': + self.need_passphrase_sym = True + elif key == 'USERID_HINT': + self.userid_hint = value.strip().split() + else: + raise ValueError("Unknown status message: %r" % key) diff --git a/gnupg/_trust.py b/gnupg/_trust.py new file mode 100644 index 0000000..224e7b6 --- /dev/null +++ b/gnupg/_trust.py @@ -0,0 +1,103 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''Functions for handling trustdb and trust calculations. + +The functions within this module take an instance of :class:`gnupg.GPGBase` or +a suitable subclass as their first argument. +''' + +from __future__ import absolute_import + +import os + +from . import _util +from ._util import log + +def _create_trustdb(cls): + """Create the trustdb file in our homedir, if it doesn't exist.""" + trustdb = os.path.join(cls.homedir, 'trustdb.gpg') + if not os.path.isfile(trustdb): + log.info("GnuPG complained that your trustdb file was missing. %s" + % "This is likely due to changing to a new homedir.") + log.info("Creating trustdb.gpg file in your GnuPG homedir.") + cls.fix_trustdb(trustdb) + +def export_ownertrust(cls, trustdb=None): + """Export ownertrust to a trustdb file. + + If there is already a file named :file:`trustdb.gpg` in the current GnuPG + homedir, it will be renamed to :file:`trustdb.gpg.bak`. + + :param string trustdb: The path to the trustdb.gpg file. If not given, + defaults to ``'trustdb.gpg'`` in the current GnuPG + homedir. + """ + if trustdb is None: + trustdb = os.path.join(cls.homedir, 'trustdb.gpg') + + try: + os.rename(trustdb, trustdb + '.bak') + except (OSError, IOError) as err: + log.debug(str(err)) + + export_proc = cls._open_subprocess(['--export-ownertrust']) + tdb = open(trustdb, 'wb') + _util._threaded_copy_data(export_proc.stdout, tdb) + +def import_ownertrust(self, trustdb=None): + """Import ownertrust from a trustdb file. + + :param str trustdb: The path to the trustdb.gpg file. If not given, + defaults to :file:`trustdb.gpg` in the current GnuPG + homedir. + """ + if trustdb is None: + trustdb = os.path.join(cls.homedir, 'trustdb.gpg') + + import_proc = cls._open_subprocess(['--import-ownertrust']) + tdb = open(trustdb, 'rb') + _util._threaded_copy_data(tdb, import_proc.stdin) + +def fix_trustdb(cls, trustdb=None): + """Attempt to repair a broken trustdb.gpg file. + + GnuPG>=2.0.x has this magical-seeming flag: `--fix-trustdb`. You'd think + it would fix the the trustdb. Hah! It doesn't. Here's what it does + instead:: + + (gpg)~/code/python-gnupg $ gpg2 --fix-trustdb + gpg: You may try to re-create the trustdb using the commands: + gpg: cd ~/.gnupg + gpg: gpg2 --export-ownertrust > otrust.tmp + gpg: rm trustdb.gpg + gpg: gpg2 --import-ownertrust < otrust.tmp + gpg: If that does not work, please consult the manual + + Brilliant piece of software engineering right there. + + :param str trustdb: The path to the trustdb.gpg file. If not given, + defaults to :file:`trustdb.gpg` in the current GnuPG + homedir. + """ + if trustdb is None: + trustdb = os.path.join(cls.homedir, 'trustdb.gpg') + export_proc = cls._open_subprocess(['--export-ownertrust']) + import_proc = cls._open_subprocess(['--import-ownertrust']) + _util._threaded_copy_data(export_proc.stdout, import_proc.stdin) diff --git a/gnupg/_util.py b/gnupg/_util.py new file mode 100644 index 0000000..79855ac --- /dev/null +++ b/gnupg/_util.py @@ -0,0 +1,795 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''Extra utilities for python-gnupg.''' + +from __future__ import absolute_import +from datetime import datetime +from socket import gethostname +from time import localtime +from time import mktime + +import codecs +import encodings +import os +import threading +import random +import re +import string +import sys + +# These are all the classes which are stream-like; they are used in +# :func:`_is_stream`. +_STREAMLIKE_TYPES = [] + +# These StringIO classes are actually utilised. +try: + import io + from io import StringIO + from io import BytesIO +except ImportError: + from cStringIO import StringIO +else: + # The io.IOBase type covers the above example for an open file handle in + # Python3, as well as both io.BytesIO and io.StringIO. + _STREAMLIKE_TYPES.append(io.IOBase) + +# The remaining StringIO classes which are imported are used to determine if a +# object is a stream-like in :func:`_is_stream`. +if 2 == sys.version_info[0]: + # Import the StringIO class from the StringIO module since it is a + # commonly used stream class. It is distinct from either of the + # StringIO's that may be loaded in the above try/except clause, so the + # name is prefixed with an underscore to distinguish it. + from StringIO import StringIO as _StringIO_StringIO + _STREAMLIKE_TYPES.append(_StringIO_StringIO) + + # Import the cStringIO module to test for the cStringIO stream types, + # InputType and OutputType. See + # http://stackoverflow.com/questions/14735295/to-check-an-instance-is-stringio + import cStringIO as _cStringIO + _STREAMLIKE_TYPES.append(_cStringIO.InputType) + _STREAMLIKE_TYPES.append(_cStringIO.OutputType) + + # In Python2: + # + # >>> type(open('README.md', 'rb')) + # + # + # whereas, in Python3, the `file` builtin doesn't exist and instead we get: + # + # >>> type(open('README.md', 'rb')) + # <_io.BufferedReader name='README.md'> + # + # which is covered by the above addition of io.IOBase. + _STREAMLIKE_TYPES.append(file) + + +from . import _logger + + +try: + unicode + _py3k = False + try: + isinstance(__name__, basestring) + except NameError: + msg = "Sorry, python-gnupg requires a Python version with proper" + msg += " unicode support. Please upgrade to Python>=2.6." + raise SystemExit(msg) +except NameError: + _py3k = True + +_running_windows = False +if "win" in sys.platform: + _running_windows = True + +## Directory shortcuts: +## we don't want to use this one because it writes to the install dir: +#_here = getabsfile(currentframe()).rsplit(os.path.sep, 1)[0] +_here = os.path.join(os.getcwd(), 'gnupg') ## current dir +_test = os.path.join(os.path.join(_here, 'test'), 'tmp') ## ./tests/tmp +_user = os.environ.get('HOME') ## $HOME + +# Fix for Issue #74: we shouldn't expect that a $HOME directory is set in all +# environs. https://github.com/isislovecruft/python-gnupg/issues/74 +if not _user: + _user = '/tmp/python-gnupg' + try: + os.makedirs(_user) + except (OSError, IOError): + _user = os.getcwd() + # If we can't use $HOME, but we have (or can create) a + # /tmp/python-gnupg/gnupghome directory, then we'll default to using + # that. Otherwise, we'll use the current directory + /gnupghome. + _user = os.path.sep.join([_user, 'gnupghome']) + +_ugpg = os.path.join(_user, '.gnupg') ## $HOME/.gnupg +_conf = os.path.join(os.path.join(_user, '.config'), 'python-gnupg') + ## $HOME/.config/python-gnupg + +## Logger is disabled by default +log = _logger.create_logger(0) + +#: Compiled regex for determining a GnuPG binary's version: +_VERSION_STRING_REGEX = re.compile('(\d)*(\.)*(\d)*(\.)*(\d)*') + + +def find_encodings(enc=None, system=False): + """Find functions for encoding translations for a specific codec. + + :param str enc: The codec to find translation functions for. It will be + normalized by converting to lowercase, excluding + everything which is not ascii, and hyphens will be + converted to underscores. + + :param bool system: If True, find encodings based on the system's stdin + encoding, otherwise assume utf-8. + + :raises: :exc:LookupError if the normalized codec, ``enc``, cannot be + found in Python's encoding translation map. + """ + if not enc: + enc = 'utf-8' + + if system: + if getattr(sys.stdin, 'encoding', None) is None: + enc = sys.stdin.encoding + log.debug("Obtained encoding from stdin: %s" % enc) + else: + enc = 'ascii' + + ## have to have lowercase to work, see + ## http://docs.python.org/dev/library/codecs.html#standard-encodings + enc = enc.lower() + codec_alias = encodings.normalize_encoding(enc) + + codecs.register(encodings.search_function) + coder = codecs.lookup(codec_alias) + + return coder + + +if _py3k: + def b(x): + """See http://python3porting.com/problems.html#nicer-solutions""" + coder = find_encodings() + if isinstance(x, bytes): + return coder.encode(x.decode(coder.name))[0] + else: + return coder.encode(x)[0] + + def s(x): + if isinstance(x, str): + return x + elif isinstance(x, (bytes, bytearray)): + return x.decode(find_encodings().name) + else: + raise NotImplemented +else: + def b(x): + """See http://python3porting.com/problems.html#nicer-solutions""" + return x + + def s(x): + if isinstance(x, basestring): + return x + elif isinstance(x, (bytes, bytearray)): + return x.decode(find_encodings().name) + else: + raise NotImplemented + +def binary(data): + coder = find_encodings() + + if _py3k and isinstance(data, bytes): + encoded = coder.encode(data.decode(coder.name))[0] + elif _py3k and isinstance(data, str): + encoded = coder.encode(data)[0] + elif not _py3k and type(data) is not str: + encoded = coder.encode(data)[0] + else: + encoded = data + + return encoded + + +def author_info(name, contact=None, public_key=None): + """Easy object-oriented representation of contributor info. + + :param str name: The contributor´s name. + :param str contact: The contributor´s email address or contact + information, if given. + :param str public_key: The contributor´s public keyid, if given. + """ + return Storage(name=name, contact=contact, public_key=public_key) + +def _copy_data(instream, outstream): + """Copy data from one stream to another. + + :type instream: :class:`io.BytesIO` or :class:`io.StringIO` or file + :param instream: A byte stream or open file to read from. + :param file outstream: The file descriptor of a tmpfile to write to. + """ + sent = 0 + + while True: + if ((_py3k and isinstance(instream, str)) or + (not _py3k and isinstance(instream, basestring))): + data = instream[:1024] + instream = instream[1024:] + else: + data = instream.read(1024) + if len(data) == 0: + break + + sent += len(data) + encoded = binary(data) + log.debug("Sending %d bytes of data..." % sent) + log.debug("Encoded data (type %s):\n%s" % (type(encoded), encoded)) + + if not _py3k: + try: + outstream.write(encoded) + except IOError as ioe: + # Can get 'broken pipe' errors even when all data was sent + if 'Broken pipe' in str(ioe): + log.error('Error sending data: Broken pipe') + else: + log.exception(ioe) + break + else: + log.debug("Wrote data type to outstream.") + else: + try: + outstream.write(bytes(encoded)) + except TypeError as te: + # XXX FIXME This appears to happen because + # _threaded_copy_data() sometimes passes the `outstream` as an + # object with type <_io.BufferredWriter> and at other times + # with type . We hit the + # following error when the `outstream` has type + # . + if not "convert 'bytes' object to str implicitly" in str(te): + log.error(str(te)) + try: + outstream.write(encoded.decode()) + except TypeError as yate: + # We hit the "'str' does not support the buffer interface" + # error in Python3 when the `outstream` is an io.BytesIO and + # we try to write a str to it. We don't care about that + # error, we'll just try again with bytes. + if not "does not support the buffer interface" in str(yate): + log.error(str(yate)) + except IOError as ioe: + # Can get 'broken pipe' errors even when all data was sent + if 'Broken pipe' in str(ioe): + log.error('Error sending data: Broken pipe') + else: + log.exception(ioe) + break + else: + log.debug("Wrote data type outstream.") + except IOError as ioe: + # Can get 'broken pipe' errors even when all data was sent + if 'Broken pipe' in str(ioe): + log.error('Error sending data: Broken pipe') + else: + log.exception(ioe) + break + else: + log.debug("Wrote data type to outstream.") + + try: + outstream.close() + except IOError as ioe: + log.error("Unable to close outstream %s:\r\t%s" % (outstream, ioe)) + else: + log.debug("Closed outstream: %d bytes sent." % sent) + +def _create_if_necessary(directory): + """Create the specified directory, if necessary. + + :param str directory: The directory to use. + :rtype: bool + :returns: True if no errors occurred and the directory was created or + existed beforehand, False otherwise. + """ + + if not os.path.isabs(directory): + log.debug("Got non-absolute path: %s" % directory) + directory = os.path.abspath(directory) + + if not os.path.isdir(directory): + log.info("Creating directory: %s" % directory) + try: + os.makedirs(directory, 0x1C0) + except OSError as ose: + log.error(ose, exc_info=1) + return False + else: + log.debug("Created directory.") + return True + +def create_uid_email(username=None, hostname=None): + """Create an email address suitable for a UID on a GnuPG key. + + :param str username: The username portion of an email address. If None, + defaults to the username of the running Python + process. + + :param str hostname: The FQDN portion of an email address. If None, the + hostname is obtained from gethostname(2). + + :rtype: str + :returns: A string formatted as @. + """ + if hostname: + hostname = hostname.replace(' ', '_') + if not username: + try: username = os.environ['LOGNAME'] + except KeyError: username = os.environ['USERNAME'] + + if not hostname: hostname = gethostname() + + uid = "%s@%s" % (username.replace(' ', '_'), hostname) + else: + username = username.replace(' ', '_') + if (not hostname) and (username.find('@') == 0): + uid = "%s@%s" % (username, gethostname()) + elif hostname: + uid = "%s@%s" % (username, hostname) + else: + uid = username + + return uid + +def _deprefix(line, prefix, callback=None): + """Remove the prefix string from the beginning of line, if it exists. + + :param string line: A line, such as one output by GnuPG's status-fd. + :param string prefix: A substring to remove from the beginning of + ``line``. Case insensitive. + :type callback: callable + :param callback: Function to call if the prefix is found. The signature to + callback will be only one argument, the ``line`` without the ``prefix``, i.e. + ``callback(line)``. + :rtype: string + :returns: If the prefix was found, the ``line`` without the prefix is + returned. Otherwise, the original ``line`` is returned. + """ + try: + assert line.upper().startswith(u''.join(prefix).upper()) + except AssertionError: + log.debug("Line doesn't start with prefix '%s':\n%s" % (prefix, line)) + return line + else: + newline = line[len(prefix):] + if callback is not None: + try: + callback(newline) + except Exception as exc: + log.exception(exc) + return newline + +def _find_binary(binary=None): + """Find the absolute path to the GnuPG binary. + + Also run checks that the binary is not a symlink, and check that + our process real uid has exec permissions. + + :param str binary: The path to the GnuPG binary. + :raises: :exc:`~exceptions.RuntimeError` if it appears that GnuPG is not + installed. + :rtype: str + :returns: The absolute path to the GnuPG binary to use, if no exceptions + occur. + """ + found = None + if binary is not None: + if os.path.isabs(binary) and os.path.isfile(binary): + return binary + if not os.path.isabs(binary): + try: + found = _which(binary) + log.debug("Found potential binary paths: %s" + % '\n'.join([path for path in found])) + found = found[0] + except IndexError as ie: + log.info("Could not determine absolute path of binary: '%s'" + % binary) + elif os.access(binary, os.X_OK): + found = binary + if found is None: + try: found = _which('gpg', abspath_only=True, disallow_symlinks=True)[0] + except IndexError as ie: + log.error("Could not find binary for 'gpg'.") + try: found = _which('gpg2')[0] + except IndexError as ie: + log.error("Could not find binary for 'gpg2'.") + if found is None: + raise RuntimeError("GnuPG is not installed!") + + return found + +def _has_readwrite(path): + """ + Determine if the real uid/gid of the executing user has read and write + permissions for a directory or a file. + + :param str path: The path to the directory or file to check permissions + for. + :rtype: bool + :returns: True if real uid/gid has read+write permissions, False otherwise. + """ + return os.access(path, os.R_OK ^ os.W_OK) + +def _is_file(filename): + """Check that the size of the thing which is supposed to be a filename has + size greater than zero, without following symbolic links or using + :func:os.path.isfile. + + :param filename: An object to check. + :rtype: bool + :returns: True if **filename** is file-like, False otherwise. + """ + try: + statinfo = os.lstat(filename) + log.debug("lstat(%r) with type=%s gave us %r" + % (repr(filename), type(filename), repr(statinfo))) + if not (statinfo.st_size > 0): + raise ValueError("'%s' appears to be an empty file!" % filename) + except OSError as oserr: + log.error(oserr) + if filename == '-': + log.debug("Got '-' for filename, assuming sys.stdin...") + return True + except (ValueError, TypeError, IOError) as err: + log.error(err) + else: + return True + return False + +def _is_stream(input): + """Check that the input is a byte stream. + + :param input: An object provided for reading from or writing to. + :rtype: bool + :returns: True if :param:input is a stream, False if otherwise. + """ + return isinstance(input, tuple(_STREAMLIKE_TYPES)) + +def _is_string(thing): + """Check that **thing** is a string. The definition of the latter depends + upon the Python version. + + :param thing: The thing to check if it's a string. + :rtype: bool + :returns: ``True`` if **thing** is string (or unicode in Python2). + """ + if (_py3k and isinstance(thing, str)): + return True + if (not _py3k and isinstance(thing, basestring)): + return True + return False + +def _is_bytes(thing): + """Check that **thing** is bytes. + + :param thing: The thing to check if it's bytes. + :rtype: bool + :returns: ``True`` if **thing** is bytes or a bytearray. + """ + if isinstance(thing, (bytes, bytearray)): + return True + return False + +def _is_list_or_tuple(instance): + """Check that ``instance`` is a list or tuple. + + :param instance: The object to type check. + :rtype: bool + :returns: True if ``instance`` is a list or tuple, False otherwise. + """ + return isinstance(instance, (list, tuple,)) + +def _is_gpg1(version): + """Returns True if using GnuPG version 1.x. + + :param tuple version: A tuple of three integers indication major, minor, + and micro version numbers. + """ + (major, minor, micro) = _match_version_string(version) + if major == 1: + return True + return False + +def _is_gpg2(version): + """Returns True if using GnuPG version 2.x. + + :param tuple version: A tuple of three integers indication major, minor, + and micro version numbers. + """ + (major, minor, micro) = _match_version_string(version) + if major == 2: + return True + return False + +def _make_binary_stream(thing, encoding=None, armor=True): + """Encode **thing**, then make it stream/file-like. + + :param thing: The thing to turn into a encoded stream. + :rtype: ``io.BytesIO`` or ``io.StringIO``. + :returns: The encoded **thing**, wrapped in an ``io.BytesIO`` (if + available), otherwise wrapped in a ``io.StringIO``. + """ + if _py3k: + if isinstance(thing, str): + thing = thing.encode(encoding) + else: + if type(thing) is not str: + thing = thing.encode(encoding) + + try: + rv = BytesIO(thing) + except NameError: + rv = StringIO(thing) + + return rv + +def _make_passphrase(length=None, save=False, file=None): + """Create a passphrase and write it to a file that only the user can read. + + This is not very secure, and should not be relied upon for actual key + passphrases. + + :param int length: The length in bytes of the string to generate. + + :param file file: The file to save the generated passphrase in. If not + given, defaults to 'passphrase--' in the top-level directory. + """ + if not length: + length = 40 + + passphrase = _make_random_string(length) + + if save: + ruid, euid, suid = os.getresuid() + gid = os.getgid() + now = mktime(localtime()) + + if not file: + filename = str('passphrase-%s-%s' % uid, now) + file = os.path.join(_repo, filename) + + with open(file, 'a') as fh: + fh.write(passphrase) + fh.flush() + fh.close() + os.chmod(file, stat.S_IRUSR | stat.S_IWUSR) + os.chown(file, ruid, gid) + + log.warn("Generated passphrase saved to %s" % file) + return passphrase + +def _make_random_string(length): + """Returns a random lowercase, uppercase, alphanumerical string. + + :param int length: The length in bytes of the string to generate. + """ + chars = string.ascii_lowercase + string.ascii_uppercase + string.digits + return ''.join(random.choice(chars) for x in range(length)) + +def _match_version_string(version): + """Sort a binary version string into major, minor, and micro integers. + + :param str version: A version string in the form x.x.x + """ + matched = _VERSION_STRING_REGEX.match(version) + g = matched.groups() + major, minor, micro = int(g[0]), int(g[2]), int(g[4]) + return (major, minor, micro) + +def _next_year(): + """Get the date of today plus one year. + + :rtype: str + :returns: The date of this day next year, in the format '%Y-%m-%d'. + """ + now = datetime.now().__str__() + date = now.split(' ', 1)[0] + year, month, day = date.split('-', 2) + next_year = str(int(year)+1) + return '-'.join((next_year, month, day)) + +def _now(): + """Get a timestamp for right now, formatted according to ISO 8601.""" + return datetime.isoformat(datetime.now()) + +def _separate_keyword(line): + """Split the line, and return (first_word, the_rest).""" + try: + first, rest = line.split(None, 1) + except ValueError: + first = line.strip() + rest = '' + return first, rest + +def _threaded_copy_data(instream, outstream): + """Copy data from one stream to another in a separate thread. + + Wraps ``_copy_data()`` in a :class:`threading.Thread`. + + :type instream: :class:`io.BytesIO` or :class:`io.StringIO` + :param instream: A byte stream to read from. + :param file outstream: The file descriptor of a tmpfile to write to. + """ + copy_thread = threading.Thread(target=_copy_data, + args=(instream, outstream)) + copy_thread.setDaemon(True) + log.debug('%r, %r, %r', copy_thread, instream, outstream) + copy_thread.start() + return copy_thread + +def _utc_epoch(): + """Get the seconds since epoch.""" + return int(mktime(localtime())) + +def _which(executable, flags=os.X_OK, abspath_only=False, disallow_symlinks=False): + """Borrowed from Twisted's :mod:twisted.python.proutils . + + Search PATH for executable files with the given name. + + On newer versions of MS-Windows, the PATHEXT environment variable will be + set to the list of file extensions for files considered executable. This + will normally include things like ".EXE". This fuction will also find files + with the given name ending with any of these extensions. + + On MS-Windows the only flag that has any meaning is os.F_OK. Any other + flags will be ignored. + + Note: This function does not help us prevent an attacker who can already + manipulate the environment's PATH settings from placing malicious code + higher in the PATH. It also does happily follows links. + + :param str name: The name for which to search. + :param int flags: Arguments to L{os.access}. + :rtype: list + :returns: A list of the full paths to files found, in the order in which + they were found. + """ + def _can_allow(p): + if not os.access(p, flags): + return False + if abspath_only and not os.path.abspath(p): + log.warn('Ignoring %r (path is not absolute)', p) + return False + if disallow_symlinks and os.path.islink(p): + log.warn('Ignoring %r (path is a symlink)', p) + return False + return True + + result = [] + exts = filter(None, os.environ.get('PATHEXT', '').split(os.pathsep)) + path = os.environ.get('PATH', None) + if path is None: + return [] + for p in os.environ.get('PATH', '').split(os.pathsep): + p = os.path.join(p, executable) + if _can_allow(p): + result.append(p) + for e in exts: + pext = p + e + if _can_allow(pext): + result.append(pext) + return result + +def _write_passphrase(stream, passphrase, encoding): + """Write the passphrase from memory to the GnuPG process' stdin. + + :type stream: file, :class:`~io.BytesIO`, or :class:`~io.StringIO` + :param stream: The input file descriptor to write the password to. + :param str passphrase: The passphrase for the secret key material. + :param str encoding: The data encoding expected by GnuPG. Usually, this + is ``sys.getfilesystemencoding()``. + """ + passphrase = '%s\n' % passphrase + passphrase = passphrase.encode(encoding) + stream.write(passphrase) + log.debug("Wrote passphrase on stdin.") + + +class InheritableProperty(object): + """Based on the emulation of PyProperty_Type() in Objects/descrobject.c""" + + def __init__(self, fget=None, fset=None, fdel=None, doc=None): + self.fget = fget + self.fset = fset + self.fdel = fdel + self.__doc__ = doc + + def __get__(self, obj, objtype=None): + if obj is None: + return self + if self.fget is None: + raise AttributeError("unreadable attribute") + if self.fget.__name__ == '' or not self.fget.__name__: + return self.fget(obj) + else: + return getattr(obj, self.fget.__name__)() + + def __set__(self, obj, value): + if self.fset is None: + raise AttributeError("can't set attribute") + if self.fset.__name__ == '' or not self.fset.__name__: + self.fset(obj, value) + else: + getattr(obj, self.fset.__name__)(value) + + def __delete__(self, obj): + if self.fdel is None: + raise AttributeError("can't delete attribute") + if self.fdel.__name__ == '' or not self.fdel.__name__: + self.fdel(obj) + else: + getattr(obj, self.fdel.__name__)() + + +class Storage(dict): + """A dictionary where keys are stored as class attributes. + + For example, ``obj.foo`` can be used in addition to ``obj['foo']``: + + >>> o = Storage(a=1) + >>> o.a + 1 + >>> o['a'] + 1 + >>> o.a = 2 + >>> o['a'] + 2 + >>> del o.a + >>> o.a + None + """ + def __getattr__(self, key): + try: + return self[key] + except KeyError as k: + return None + + def __setattr__(self, key, value): + self[key] = value + + def __delattr__(self, key): + try: + del self[key] + except KeyError as k: + raise AttributeError(k.args[0]) + + def __repr__(self): + return '' + + def __getstate__(self): + return dict(self) + + def __setstate__(self, value): + for (k, v) in value.items(): + self[k] = v diff --git a/gnupg/_version.py b/gnupg/_version.py new file mode 100644 index 0000000..fede8ae --- /dev/null +++ b/gnupg/_version.py @@ -0,0 +1,11 @@ + +# This file was generated by 'versioneer.py' (0.7+) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +version_version = '2.0.2' +version_full = '4f1b1f6a8d16df9d4e1f29ba9223f05889131189' +def get_versions(default={}, verbose=False): + return {'version': version_version, 'full': version_full} + diff --git a/gnupg/copyleft.py b/gnupg/copyleft.py new file mode 100644 index 0000000..6e81e1c --- /dev/null +++ b/gnupg/copyleft.py @@ -0,0 +1,749 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +'''copyleft.py +-------------- +Copyright information for python-gnupg. +''' + +from __future__ import absolute_import + +from . import _util + + +authors = { 'lovecruft_isis': _util.author_info( + 'Isis Agora Lovecruft', 'isis@leap.se', '0xA3ADB67A2CDB8B35'), + + 'sajip_vinay': _util.author_info( + 'Vinay Sajip', 'vinay.sajip@gmail.com', '0xDE6EF0B2'), + + 'traugott_steve': _util.author_info( + 'Steve Traugott', 'stevegt@terraluna.org'), + + 'kuchling_am': _util.author_info( + 'A.M. Kuchling', 'amk@amk.ca'), } + +copyright = """\ +Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 + © 2013 Andrej B. + © 2013 LEAP Encryption Access Project + © 2008-2012 Vinay Sajip + © 2005 Steve Traugott + © 2004 A.M. Kuchling +All rights reserved. +See included LICENSE or ``print(gnupg.__license__)`` for full license.""" + +disclaimer = """\ +This file is part of python-gnupg, a Python wrapper around GnuPG. +%s + +This program is free software: you can redistribute it and/or modify it +under the terms of the GNU General Public License as published by the Free +Software Foundation, either version 3 of the License, or (at your option) +any later version. + +This program is distributed in the hope that it will be useful, but WITHOUT +ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. +""" % (copyright,) + +txcopyright = """\ +Where stated, parts of this program were taken from Twisted, which is +licensed as follows: + +Twisted, the Framework of Your Internet +Copyright © 2001-2013 Twisted Matrix Laboratories. +See LICENSE for details. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.""" + + +GPLv3_text = """\ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + + BEGIN ORIGINAL LICENSE TEXT + +Copyright (c) 2008-2012 by Vinay Sajip. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + * The name(s) of the copyright holder(s) may not be used to endorse or + promote products derived from this software without specific prior + written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDER(S) "AS IS" AND ANY EXPRESS OR +IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO +EVENT SHALL THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY DIRECT, INDIRECT, +INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF +ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + END ORIGINAL LICENSE TEXT +""" + +full_text = "%s\n\n%s\n\n%s" % (disclaimer, txcopyright, GPLv3_text) diff --git a/gnupg/gnupg.py b/gnupg/gnupg.py new file mode 100644 index 0000000..215233e --- /dev/null +++ b/gnupg/gnupg.py @@ -0,0 +1,1080 @@ +# -*- coding: utf-8 -*- +# +# This file is part of python-gnupg, a Python interface to GnuPG. +# Copyright © 2013 Isis Lovecruft, 0xA3ADB67A2CDB8B35 +# © 2013 Andrej B. +# © 2013 LEAP Encryption Access Project +# © 2008-2012 Vinay Sajip +# © 2005 Steve Traugott +# © 2004 A.M. Kuchling +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the Free +# Software Foundation, either version 3 of the License, or (at your option) +# any later version. +# +# This program is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the included LICENSE file for details. + +"""gnupg.py +=========== +A Python interface to GnuPG. + +.. moduleauthor:: Isis Lovecruft + see also :attr:`gnupg.__authors__` +.. license:: see :attr:`gnupg.__license__` +.. info:: https://github.com/isislovecruft/python-gnupg +""" + +from __future__ import absolute_import +from codecs import open as open + +import encodings +import functools +import os +import re +import textwrap + +#: see :pep:`328` http://docs.python.org/2.5/whatsnew/pep-328.html +from . import _util +from . import _trust +from ._meta import GPGBase +from ._parsers import _fix_unsafe +from ._util import _is_list_or_tuple +from ._util import _is_stream +from ._util import _make_binary_stream +from ._util import log + + +class GPG(GPGBase): + """Python interface for handling interactions with GnuPG, including keyfile + generation, keyring maintainance, import and export, encryption and + decryption, sending to and recieving from keyservers, and signing and + verification. + """ + + #: The number of simultaneous keyids we should list operations like + #: '--list-sigs' to: + _batch_limit = 25 + + def __init__(self, binary=None, homedir=None, verbose=False, + use_agent=False, keyring=None, secring=None, + ignore_homedir_permissions=False, options=None): + """Initialize a GnuPG process wrapper. + + :param str binary: Name for GnuPG binary executable. If the absolute + path is not given, the environment variable + ``$PATH`` is searched for the executable and + checked that the real uid/gid of the user has + sufficient permissions. + + :param str homedir: Full pathname to directory containing the public + and private keyrings. Default is whatever GnuPG + defaults to. + + :type ignore_homedir_permissions: :obj:`bool` + :param ignore_homedir_permissions: If true, bypass check that homedir + be writable. + + :type verbose: :obj:`str` or :obj:`int` or :obj:`bool` + :param verbose: String or numeric value to pass to GnuPG's + ``--debug-level`` option. See the GnuPG man page for + the list of valid options. If False, debug output is + not generated by the GnuPG binary. If True, defaults + to ``--debug-level basic.`` + + :param str keyring: Name of keyring file containing public key data. + If unspecified, defaults to :file:`pubring.gpg` in + the **homedir** directory. + + :param str secring: Name of alternative secret keyring file to use. If + left unspecified, this will default to using + :file:`secring.gpg` in the **homedir** directory, + and create that file if it does not exist. + + :param list options: A list of additional options to pass to the GnuPG + binary. + + :raises: A :exc:`~exceptions.RuntimeError` with explanation message + if there is a problem invoking GnuPG. + + Example: + + >>> import gnupg + GnuPG logging disabled... + >>> gpg = gnupg.GPG(homedir='doctests') + >>> gpg.keyring + './doctests/pubring.gpg' + >>> gpg.secring + './doctests/secring.gpg' + >>> gpg.use_agent + False + >>> gpg.binary + '/usr/bin/gpg' + """ + + super(GPG, self).__init__( + binary=binary, + home=homedir, + keyring=keyring, + secring=secring, + options=options, + verbose=verbose, + use_agent=use_agent, + ignore_homedir_permissions=ignore_homedir_permissions, + ) + + log.info(textwrap.dedent(""" + Initialised settings: + binary: %s + binary version: %s + homedir: %s + ignore_homedir_permissions: %s + keyring: %s + secring: %s + default_preference_list: %s + keyserver: %s + options: %s + verbose: %s + use_agent: %s + """ % (self.binary, + self.binary_version, + self.homedir, + self.ignore_homedir_permissions, + self.keyring, + self.secring, + self.default_preference_list, + self.keyserver, self.options, + str(self.verbose), + str(self.use_agent)))) + + self._batch_dir = os.path.join(self.homedir, 'batch-files') + self._key_dir = os.path.join(self.homedir, 'generated-keys') + + #: The keyring used in the most recently created batch file + self.temp_keyring = None + #: The secring used in the most recently created batch file + self.temp_secring = None + + # Make sure that the trustdb exists, or else GnuPG will exit with a + # fatal error (at least it does with GnuPG>=2.0.0): + self.create_trustdb() + + # The --no-use-agent and --use-agent options were deprecated in GnuPG + # 2.x, so we should set use_agent to None here to avoid having + # GPGBase._make_args() add either one. + if self.is_gpg2(): + self.use_agent = None + + @functools.wraps(_trust._create_trustdb) + def create_trustdb(self): + if self.is_gpg2(): + _trust._create_trustdb(self) + else: + log.info("Creating the trustdb is only available with GnuPG>=2.x") + # For backward compatibility with python-gnupg<=1.3.1: + _create_trustdb = create_trustdb + + @functools.wraps(_trust.fix_trustdb) + def fix_trustdb(self, trustdb=None): + if self.is_gpg2(): + _trust.fix_trustdb(self) + else: + log.info("Fixing the trustdb is only available with GnuPG>=2.x") + # For backward compatibility with python-gnupg<=1.3.1: + _fix_trustdb = fix_trustdb + + @functools.wraps(_trust.import_ownertrust) + def import_ownertrust(self, trustdb=None): + if self.is_gpg2(): + _trust.import_ownertrust(self) + else: + log.info("Importing ownertrust is only available with GnuPG>=2.x") + # For backward compatibility with python-gnupg<=1.3.1: + _import_ownertrust = import_ownertrust + + @functools.wraps(_trust.export_ownertrust) + def export_ownertrust(self, trustdb=None): + if self.is_gpg2(): + _trust.export_ownertrust(self) + else: + log.info("Exporting ownertrust is only available with GnuPG>=2.x") + # For backward compatibility with python-gnupg<=1.3.1: + _export_ownertrust = export_ownertrust + + def is_gpg1(self): + """Returns true if using GnuPG <= 1.x.""" + return _util._is_gpg1(self.binary_version) + + def is_gpg2(self): + """Returns true if using GnuPG >= 2.x.""" + return _util._is_gpg2(self.binary_version) + + def sign(self, data, **kwargs): + """Create a signature for a message string or file. + + Note that this method is not for signing other keys. (In GnuPG's + terms, what we all usually call 'keysigning' is actually termed + 'certification'...) Even though they are cryptographically the same + operation, GnuPG differentiates between them, presumedly because these + operations are also the same as the decryption operation. If the + ``key_usage``s ``C (certification)``, ``S (sign)``, and ``E + (encrypt)``, were all the same key, the key would "wear down" through + frequent signing usage -- since signing data is usually done often -- + meaning that the secret portion of the keypair, also used for + decryption in this scenario, would have a statistically higher + probability of an adversary obtaining an oracle for it (or for a + portion of the rounds in the cipher algorithm, depending on the family + of cryptanalytic attack used). + + In simpler terms: this function isn't for signing your friends' keys, + it's for something like signing an email. + + :type data: :obj:`str` or :obj:`file` + :param data: A string or file stream to sign. + :param str default_key: The key to sign with. + :param str passphrase: The passphrase to pipe to stdin. + :param bool clearsign: If True, create a cleartext signature. + :param bool detach: If True, create a detached signature. + :param bool binary: If True, do not ascii armour the output. + :param str digest_algo: The hash digest to use. Again, to see which + hashes your GnuPG is capable of using, do: + :command:`$ gpg --with-colons --list-config digestname`. + The default, if unspecified, is ``'SHA512'``. + """ + if 'default_key' in kwargs: + log.info("Signing message '%r' with keyid: %s" + % (data, kwargs['default_key'])) + else: + log.warn("No 'default_key' given! Using first key on secring.") + + if hasattr(data, 'read'): + result = self._sign_file(data, **kwargs) + elif not _is_stream(data): + stream = _make_binary_stream(data, self._encoding) + result = self._sign_file(stream, **kwargs) + stream.close() + else: + log.warn("Unable to sign message '%s' with type %s" + % (data, type(data))) + result = None + return result + + def verify(self, data): + """Verify the signature on the contents of the string ``data``. + + >>> gpg = GPG(homedir="doctests") + >>> input = gpg.gen_key_input(Passphrase='foo') + >>> key = gpg.gen_key(input) + >>> assert key + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='bar') + >>> assert not sig + >>> sig = gpg.sign('hello',keyid=key.fingerprint,passphrase='foo') + >>> assert sig + >>> verify = gpg.verify(sig.data) + >>> assert verify + + """ + f = _make_binary_stream(data, self._encoding) + result = self.verify_file(f) + f.close() + return result + + def verify_file(self, file, sig_file=None): + """Verify the signature on the contents of a file or file-like + object. Can handle embedded signatures as well as detached + signatures. If using detached signatures, the file containing the + detached signature should be specified as the ``sig_file``. + + :param file file: A file descriptor object. + + :param str sig_file: A file containing the GPG signature data for + ``file``. If given, ``file`` is verified via this detached + signature. Its type will be checked with :func:`_util._is_file`. + """ + + result = self._result_map['verify'](self) + + if sig_file is None: + log.debug("verify_file(): Handling embedded signature") + args = ["--verify"] + proc = self._open_subprocess(args) + writer = _util._threaded_copy_data(file, proc.stdin) + self._collect_output(proc, result, writer, stdin=proc.stdin) + else: + if not _util._is_file(sig_file): + log.debug("verify_file(): '%r' is not a file" % sig_file) + return result + log.debug('verify_file(): Handling detached verification') + sig_fh = None + try: + sig_fh = open(sig_file, 'rb') + args = ["--verify %s -" % sig_fh.name] + proc = self._open_subprocess(args) + writer = _util._threaded_copy_data(file, proc.stdin) + self._collect_output(proc, result, writer, stdin=proc.stdin) + finally: + if sig_fh and not sig_fh.closed: + sig_fh.close() + return result + + def import_keys(self, key_data): + """ + Import the key_data into our keyring. + + >>> import shutil + >>> shutil.rmtree("doctests") + >>> gpg = gnupg.GPG(homedir="doctests") + >>> inpt = gpg.gen_key_input() + >>> key1 = gpg.gen_key(inpt) + >>> print1 = str(key1.fingerprint) + >>> pubkey1 = gpg.export_keys(print1) + >>> seckey1 = gpg.export_keys(print1,secret=True) + >>> key2 = gpg.gen_key(inpt) + >>> print2 = key2.fingerprint + >>> seckeys = gpg.list_keys(secret=True) + >>> pubkeys = gpg.list_keys() + >>> assert print1 in seckeys.fingerprints + >>> assert print1 in pubkeys.fingerprints + >>> str(gpg.delete_keys(print1)) + 'Must delete secret key first' + >>> str(gpg.delete_keys(print1,secret=True)) + 'ok' + >>> str(gpg.delete_keys(print1)) + 'ok' + >>> pubkeys = gpg.list_keys() + >>> assert not print1 in pubkeys.fingerprints + >>> result = gpg.import_keys(pubkey1) + >>> pubkeys = gpg.list_keys() + >>> seckeys = gpg.list_keys(secret=True) + >>> assert not print1 in seckeys.fingerprints + >>> assert print1 in pubkeys.fingerprints + >>> result = gpg.import_keys(seckey1) + >>> assert result + >>> seckeys = gpg.list_keys(secret=True) + >>> assert print1 in seckeys.fingerprints + """ + ## xxx need way to validate that key_data is actually a valid GPG key + ## it might be possible to use --list-packets and parse the output + + result = self._result_map['import'](self) + log.info('Importing: %r', key_data[:256]) + data = _make_binary_stream(key_data, self._encoding) + self._handle_io(['--import'], data, result, binary=True) + data.close() + return result + + def recv_keys(self, *keyids, **kwargs): + """Import keys from a keyserver. + + >>> gpg = gnupg.GPG(homedir="doctests") + >>> key = gpg.recv_keys('hkp://pgp.mit.edu', '3FF0DB166A7476EA') + >>> assert key + + :param str keyids: Each ``keyids`` argument should be a string + containing a keyid to request. + :param str keyserver: The keyserver to request the ``keyids`` from; + defaults to `gnupg.GPG.keyserver`. + """ + if keyids: + keys = ' '.join([key for key in keyids]) + return self._recv_keys(keys, **kwargs) + else: + log.error("No keyids requested for --recv-keys!") + + def delete_keys(self, fingerprints, secret=False, subkeys=False): + """Delete a key, or list of keys, from the current keyring. + + The keys must be referred to by their full fingerprints for GnuPG to + delete them. If ``secret=True``, the corresponding secret keyring will + be deleted from :obj:`.secring`. + + :type fingerprints: :obj:`str` or :obj:`list` or :obj:`tuple` + :param fingerprints: A string, or a list/tuple of strings, + representing the fingerprint(s) for the key(s) + to delete. + + :param bool secret: If True, delete the corresponding secret key(s) + also. (default: False) + + :param bool subkeys: If True, delete the secret subkey first, then the + public key. (default: False) Same as: + :command:`$gpg --delete-secret-and-public-key 0x12345678`. + """ + which = 'keys' + if secret: + which = 'secret-keys' + if subkeys: + which = 'secret-and-public-keys' + + if _is_list_or_tuple(fingerprints): + fingerprints = ' '.join(fingerprints) + + args = ['--batch'] + args.append("--delete-{0} {1}".format(which, fingerprints)) + + result = self._result_map['delete'](self) + p = self._open_subprocess(args) + self._collect_output(p, result, stdin=p.stdin) + return result + + def export_keys(self, keyids, secret=False, subkeys=False): + """Export the indicated ``keyids``. + + :param str keyids: A keyid or fingerprint in any format that GnuPG will + accept. + :param bool secret: If True, export only the secret key. + :param bool subkeys: If True, export the secret subkeys. + """ + which = '' + if subkeys: + which = '-secret-subkeys' + elif secret: + which = '-secret-keys' + + if _is_list_or_tuple(keyids): + keyids = ' '.join(['%s' % k for k in keyids]) + + args = ["--armor"] + args.append("--export{0} {1}".format(which, keyids)) + + p = self._open_subprocess(args) + ## gpg --export produces no status-fd output; stdout will be empty in + ## case of failure + #stdout, stderr = p.communicate() + result = self._result_map['delete'](self) # any result will do + self._collect_output(p, result, stdin=p.stdin) + log.debug('Exported:%s%r' % (os.linesep, result.data)) + return result.data.decode(self._encoding, self._decode_errors) + + def list_keys(self, secret=False): + """List the keys currently in the keyring. + + The GnuPG option '--show-photos', according to the GnuPG manual, "does + not work with --with-colons", but since we can't rely on all versions + of GnuPG to explicitly handle this correctly, we should probably + include it in the args. + + >>> import shutil + >>> shutil.rmtree("doctests") + >>> gpg = GPG(homedir="doctests") + >>> input = gpg.gen_key_input() + >>> result = gpg.gen_key(input) + >>> print1 = result.fingerprint + >>> result = gpg.gen_key(input) + >>> print2 = result.fingerprint + >>> pubkeys = gpg.list_keys() + >>> assert print1 in pubkeys.fingerprints + >>> assert print2 in pubkeys.fingerprints + """ + + which = 'public-keys' + if secret: + which = 'secret-keys' + args = "--list-%s --fixed-list-mode --fingerprint " % (which,) + args += "--with-colons --list-options no-show-photos" + args = [args] + p = self._open_subprocess(args) + + # there might be some status thingumy here I should handle... (amk) + # ...nope, unless you care about expired sigs or keys (stevegt) + + # Get the response information + result = self._result_map['list'](self) + self._collect_output(p, result, stdin=p.stdin) + lines = result.data.decode(self._encoding, + self._decode_errors).splitlines() + self._parse_keys(result) + return result + + def list_packets(self, raw_data): + """List the packet contents of a file.""" + args = ["--list-packets"] + result = self._result_map['packets'](self) + self._handle_io(args, _make_binary_stream(raw_data, self._encoding), + result) + return result + + def list_sigs(self, *keyids): + """Get the signatures for each of the ``keyids``. + + >>> import gnupg + >>> gpg = gnupg.GPG(homedir="doctests") + >>> key_input = gpg.gen_key_input() + >>> key = gpg.gen_key(key_input) + >>> assert key.fingerprint + + :rtype: dict + :returns: res.sigs is a dictionary whose keys are the uids and whose + values are a set of signature keyids. + """ + if len(keyids) > self._batch_limit: + raise ValueError( + "List signatures is limited to %d keyids simultaneously" + % self._batch_limit) + + args = ["--with-colons", "--fixed-list-mode", "--list-sigs"] + + for key in keyids: + args.append(key) + + proc = self._open_subprocess(args) + result = self._result_map['list'](self) + self._collect_output(proc, result, stdin=proc.stdin) + self._parse_keys(result) + return result + + def _parse_keys(self, result): + lines = result.data.decode(self._encoding, + self._decode_errors).splitlines() + valid_keywords = 'pub uid sec fpr sub sig'.split() + for line in lines: + if self.verbose: + print(line) + log.debug("%r", line.rstrip()) + if not line: + break + L = line.strip().split(':') + if not L: + continue + keyword = L[0] + if keyword in valid_keywords: + getattr(result, keyword)(L) + + def gen_key(self, input): + """Generate a GnuPG key through batch file key generation. See + :meth:`GPG.gen_key_input()` for creating the control input. + + >>> import gnupg + >>> gpg = gnupg.GPG(homedir="doctests") + >>> key_input = gpg.gen_key_input() + >>> key = gpg.gen_key(key_input) + >>> assert key.fingerprint + + :param dict input: A dictionary of parameters and values for the new + key. + :returns: The result mapping with details of the new key, which is a + :class:`GenKey ` object. + """ + args = ["--gen-key --batch"] + key = self._result_map['generate'](self) + f = _make_binary_stream(input, self._encoding) + self._handle_io(args, f, key, binary=True) + f.close() + + fpr = str(key.fingerprint) + if len(fpr) == 20: + for d in map(lambda x: os.path.dirname(x), + [self.temp_keyring, self.temp_secring]): + if not os.path.exists(d): + os.makedirs(d) + + if self.temp_keyring: + if os.path.isfile(self.temp_keyring): + prefix = os.path.join(self.temp_keyring, fpr) + try: os.rename(self.temp_keyring, prefix+".pubring") + except OSError as ose: log.error(str(ose)) + + if self.temp_secring: + if os.path.isfile(self.temp_secring): + prefix = os.path.join(self.temp_secring, fpr) + try: os.rename(self.temp_secring, prefix+".secring") + except OSError as ose: log.error(str(ose)) + + log.info("Key created. Fingerprint: %s" % fpr) + key.keyring = self.temp_keyring + key.secring = self.temp_secring + self.temp_keyring = None + self.temp_secring = None + + return key + + def gen_key_input(self, separate_keyring=False, save_batchfile=False, + testing=False, **kwargs): + """Generate a batch file for input to :meth:`~gnupg.GPG.gen_key`. + + The GnuPG batch file key generation feature allows unattended key + generation by creating a file with special syntax and then providing it + to: :command:`gpg --gen-key --batch`. Batch files look like this: + + | Name-Real: Alice + | Name-Email: alice@inter.net + | Expire-Date: 2014-04-01 + | Key-Type: RSA + | Key-Length: 4096 + | Key-Usage: cert + | Subkey-Type: RSA + | Subkey-Length: 4096 + | Subkey-Usage: encrypt,sign,auth + | Passphrase: sekrit + | %pubring foo.gpg + | %secring sec.gpg + | %commit + + which is what this function creates for you. All of the available, + non-control parameters are detailed below (control parameters are the + ones which begin with a '%'). For example, to generate the batch file + example above, use like this: + + >>> import gnupg + GnuPG logging disabled... + >>> from __future__ import print_function + >>> gpg = gnupg.GPG(homedir='doctests') + >>> alice = { 'name_real': 'Alice', + ... 'name_email': 'alice@inter.net', + ... 'expire_date': '2014-04-01', + ... 'key_type': 'RSA', + ... 'key_length': 4096, + ... 'key_usage': '', + ... 'subkey_type': 'RSA', + ... 'subkey_length': 4096, + ... 'subkey_usage': 'encrypt,sign,auth', + ... 'passphrase': 'sekrit'} + >>> alice_input = gpg.gen_key_input(**alice) + >>> print(alice_input) + Key-Type: RSA + Subkey-Type: RSA + Subkey-Usage: encrypt,sign,auth + Expire-Date: 2014-04-01 + Passphrase: sekrit + Name-Real: Alice + Name-Email: alice@inter.net + Key-Length: 4096 + Subkey-Length: 4096 + %pubring ./doctests/alice.pubring.gpg + %secring ./doctests/alice.secring.gpg + %commit + + >>> alice_key = gpg.gen_key(alice_input) + >>> assert alice_key is not None + >>> assert alice_key.fingerprint is not None + >>> message = "no one else can read my sekrit message" + >>> encrypted = gpg.encrypt(message, alice_key.fingerprint) + >>> assert isinstance(encrypted.data, str) + + :param bool separate_keyring: Specify for the new key to be written to + a separate pubring.gpg and secring.gpg. If True, + :meth:`~gnupg.GPG.gen_key` will automatically rename the separate + keyring and secring to whatever the fingerprint of the generated + key ends up being, suffixed with '.pubring' and '.secring' + respectively. + + :param bool save_batchfile: Save a copy of the generated batch file to + disk in a file named .batch, where is the + ``name_real`` parameter stripped of punctuation, spaces, and + non-ascii characters. + + :param bool testing: Uses a faster, albeit insecure random number + generator to create keys. This should only be used for testing + purposes, for keys which are going to be created and then soon + after destroyed, and never for the generation of actual use keys. + + :param str name_real: The name field of the UID in the generated key. + :param str name_comment: The comment in the UID of the generated key. + + :param str name_email: The email in the UID of the generated key. + (default: ``$USER`` @ :command:`hostname` ) Remember to use UTF-8 + encoding for the entirety of the UID. At least one of + ``name_real``, ``name_comment``, or ``name_email`` must be + provided, or else no user ID is created. + + :param str key_type: One of 'RSA', 'DSA', 'ELG-E', or 'default'. + (default: 'RSA', if using GnuPG v1.x, otherwise 'default') Starts + a new parameter block by giving the type of the primary key. The + algorithm must be capable of signing. This is a required + parameter. The algorithm may either be an OpenPGP algorithm number + or a string with the algorithm name. The special value ‘default’ + may be used for algo to create the default key type; in this case + a ``key_usage`` should not be given and 'default' must also be + used for ``subkey_type``. + + :param int key_length: The requested length of the generated key in + bits. (Default: 4096) + + :param str key_grip: hexstring This is an optional hexidecimal string + which is used to generate a CSR or certificate for an already + existing key. ``key_length`` will be ignored if this parameter + is given. + + :param str key_usage: Space or comma delimited string of key + usages. Allowed values are ‘encrypt’, ‘sign’, and ‘auth’. This is + used to generate the key flags. Please make sure that the + algorithm is capable of this usage. Note that OpenPGP requires + that all primary keys are capable of certification, so no matter + what usage is given here, the ‘cert’ flag will be on. If no + ‘Key-Usage’ is specified and the ‘Key-Type’ is not ‘default’, all + allowed usages for that particular algorithm are used; if it is + not given but ‘default’ is used the usage will be ‘sign’. + + :param str subkey_type: This generates a secondary key + (subkey). Currently only one subkey can be handled. See also + ``key_type`` above. + + :param int subkey_length: The length of the secondary subkey in bits. + + :param str subkey_usage: Key usage for a subkey; similar to + ``key_usage``. + + :type expire_date: :obj:`int` or :obj:`str` + :param expire_date: Can be specified as an iso-date or as + [d|w|m|y] Set the expiration date for the key (and the + subkey). It may either be entered in ISO date format (2000-08-15) + or as number of days, weeks, month or years. The special notation + "seconds=N" is also allowed to directly give an Epoch + value. Without a letter days are assumed. Note that there is no + check done on the overflow of the type used by OpenPGP for + timestamps. Thus you better make sure that the given value make + sense. Although OpenPGP works with time intervals, GnuPG uses an + absolute value internally and thus the last year we can represent + is 2105. + + :param str creation_date: Set the creation date of the key as stored + in the key information and which is also part of the fingerprint + calculation. Either a date like "1986-04-26" or a full timestamp + like "19860426T042640" may be used. The time is considered to be + UTC. If it is not given the current time is used. + + :param str passphrase: The passphrase for the new key. The default is + to not use any passphrase. Note that GnuPG>=2.1.x will not allow + you to specify a passphrase for batch key generation -- GnuPG will + ignore the **passphrase** parameter, stop, and ask the user for + the new passphrase. However, we can put the command + ``%no-protection`` into the batch key generation file to allow a + passwordless key to be created, which can then have its passphrase + set later with ``--edit-key``. + + :param str preferences: Set the cipher, hash, and compression + preference values for this key. This expects the same type of + string as the sub-command ‘setpref’ in the --edit-key menu. + + :param str revoker: Should be given as 'algo:fpr' (case sensitive). + Add a designated revoker to the generated key. Algo is the public + key algorithm of the designated revoker (i.e. RSA=1, DSA=17, etc.) + fpr is the fingerprint of the designated revoker. The optional + ‘sensitive’ flag marks the designated revoker as sensitive + information. Only v4 keys may be designated revokers. + + :param str keyserver: This is an optional parameter that specifies the + preferred keyserver URL for the key. + + :param str handle: This is an optional parameter only used with the + status lines ``KEY_CREATED`` and ``KEY_NOT_CREATED``. string may + be up to 100 characters and should not contain spaces. It is + useful for batch key generation to associate a key parameter block + with a status line. + + :rtype: str + :returns: A suitable input string for the :meth:`GPG.gen_key` method, + the latter of which will create the new keypair. + + See `this GnuPG Manual section`__ for more details. + + __ http://www.gnupg.org/documentation/manuals/gnupg-devel/Unattended-GPG-key-generation.html + """ + #: A boolean for determining whether to set subkey_type to 'default' + default_type = False + + parms = {} + + ## if using GnuPG version 1.x, then set the default 'Key-Type' to + ## 'RSA' because it doesn't understand 'default' + parms.setdefault('Key-Type', 'default') + if _util._is_gpg1(self.binary_version): + parms.setdefault('Key-Type', 'RSA') + log.debug("GnuPG v%s detected: setting default key type to %s." + % (self.binary_version, parms['Key-Type'])) + parms.setdefault('Key-Length', 4096) + parms.setdefault('Name-Real', "Autogenerated Key") + parms.setdefault('Expire-Date', _util._next_year()) + + name_email = kwargs.get('name_email') + uidemail = _util.create_uid_email(name_email) + parms.setdefault('Name-Email', uidemail) + + if testing: + ## This specific comment string is required by (some? all?) + ## versions of GnuPG to use the insecure PRNG: + parms.setdefault('Name-Comment', 'insecure!') + + for key, val in list(kwargs.items()): + key = key.replace('_','-').title() + ## to set 'cert', 'Key-Usage' must be blank string + if not key in ('Key-Usage', 'Subkey-Usage'): + if type('')(val).strip(): + parms[key] = val + + ## if Key-Type is 'default', make Subkey-Type also be 'default' + if parms['Key-Type'] == 'default': + default_type = True + for field in ('Key-Usage', 'Subkey-Usage',): + try: parms.pop(field) ## toss these out, handle manually + except KeyError: pass + + ## Key-Type must come first, followed by length + out = "Key-Type: %s\n" % parms.pop('Key-Type') + out += "Key-Length: %d\n" % parms.pop('Key-Length') + if 'Subkey-Type' in parms.keys(): + out += "Subkey-Type: %s\n" % parms.pop('Subkey-Type') + else: + if default_type: + out += "Subkey-Type: default\n" + if 'Subkey-Length' in parms.keys(): + out += "Subkey-Length: %s\n" % parms.pop('Subkey-Length') + + for key, val in list(parms.items()): + out += "%s: %s\n" % (key, val) + + ## There is a problem where, in the batch files, if the '%%pubring' + ## and '%%secring' are given as any static string, i.e. 'pubring.gpg', + ## that file will always get rewritten without confirmation, killing + ## off any keys we had before. So in the case where we wish to + ## generate a bunch of keys and then do stuff with them, we should not + ## give 'pubring.gpg' as our keyring file, otherwise we will lose any + ## keys we had previously. + + if separate_keyring: + ring = str(uidemail + '_' + str(_util._utc_epoch())) + self.temp_keyring = os.path.join(self.homedir, ring+'.pubring') + self.temp_secring = os.path.join(self.homedir, ring+'.secring') + out += "%%pubring %s\n" % self.temp_keyring + out += "%%secring %s\n" % self.temp_secring + + if testing: + ## see TODO file, tag :compatibility:gen_key_input: + ## + ## Add version detection before the '%no-protection' flag. + out += "%no-protection\n" + out += "%transient-key\n" + + out += "%commit\n" + + ## if we've been asked to save a copy of the batch file: + if save_batchfile and parms['Name-Email'] != uidemail: + asc_uid = encodings.normalize_encoding(parms['Name-Email']) + filename = _fix_unsafe(asc_uid) + _util._now() + '.batch' + save_as = os.path.join(self._batch_dir, filename) + readme = os.path.join(self._batch_dir, 'README') + + if not os.path.exists(self._batch_dir): + os.makedirs(self._batch_dir) + + ## the following pulls the link to GnuPG's online batchfile + ## documentation from this function's docstring and sticks it + ## in a README file in the batch directory: + + if getattr(self.gen_key_input, '__doc__', None) is not None: + docs = self.gen_key_input.__doc__ + else: + docs = str() ## docstring=None if run with "python -OO" + links = '\n'.join(x.strip() for x in docs.splitlines()[-2:]) + explain = """ +This directory was created by python-gnupg, on {}, and +it contains saved batch files, which can be given to GnuPG to automatically +generate keys. Please see +{}""".format(_util.now(), links) ## sometimes python is awesome. + + with open(readme, 'a+') as fh: + [fh.write(line) for line in explain] + + with open(save_as, 'a+') as batch_file: + [batch_file.write(line) for line in out] + + return out + + def encrypt(self, data, *recipients, **kwargs): + """Encrypt the message contained in ``data`` to ``recipients``. + + :param str data: The file or bytestream to encrypt. + + :param str recipients: The recipients to encrypt to. Recipients must + be specified keyID/fingerprint. Care should be taken in Python2.x + to make sure that the given fingerprint is in fact a string and + not a unicode object. + + :param str default_key: The keyID/fingerprint of the key to use for + signing. If given, ``data`` will be encrypted and signed. + + :param str passphrase: If given, and ``default_key`` is also given, + use this passphrase to unlock the secret portion of the + ``default_key`` to sign the encrypted ``data``. Otherwise, if + ``default_key`` is not given, but ``symmetric=True``, then use + this passphrase as the passphrase for symmetric + encryption. Signing and symmetric encryption should *not* be + combined when sending the ``data`` to other recipients, else the + passphrase to the secret key would be shared with them. + + :param bool armor: If True, ascii armor the output; otherwise, the + output will be in binary format. (Default: True) + + :param bool encrypt: If True, encrypt the ``data`` using the + ``recipients`` public keys. (Default: True) + + :param bool symmetric: If True, encrypt the ``data`` to ``recipients`` + using a symmetric key. See the ``passphrase`` parameter. Symmetric + encryption and public key encryption can be used simultaneously, + and will result in a ciphertext which is decryptable with either + the symmetric ``passphrase`` or one of the corresponding private + keys. + + :param bool always_trust: If True, ignore trust warnings on recipient + keys. If False, display trust warnings. (default: True) + + :param str output: The output file to write to. If not specified, the + encrypted output is returned, and thus should be stored as an + object in Python. For example: + + >>> import shutil + >>> import gnupg + >>> if os.path.exists("doctests"): + ... shutil.rmtree("doctests") + >>> gpg = gnupg.GPG(homedir="doctests") + >>> key_settings = gpg.gen_key_input(key_type='RSA', + ... key_length=1024, + ... key_usage='ESCA', + ... passphrase='foo') + >>> key = gpg.gen_key(key_settings) + >>> message = "The crow flies at midnight." + >>> encrypted = str(gpg.encrypt(message, key.printprint)) + >>> assert encrypted != message + >>> assert not encrypted.isspace() + >>> decrypted = str(gpg.decrypt(encrypted)) + >>> assert not decrypted.isspace() + >>> decrypted + 'The crow flies at midnight.' + + + :param bool throw_keyids: If True, make all **recipients** keyids be + zero'd out in packet information. This is the same as using + **hidden_recipients** for all **recipients**. (Default: False). + + :param list hidden_recipients: A list of recipients that should have + their keyids zero'd out in packet information. + + :param str cipher_algo: The cipher algorithm to use. To see available + algorithms with your version of GnuPG, do: + :command:`$ gpg --with-colons --list-config ciphername`. + The default ``cipher_algo``, if unspecified, is ``'AES256'``. + + :param str digest_algo: The hash digest to use. Again, to see which + hashes your GnuPG is capable of using, do: + :command:`$ gpg --with-colons --list-config digestname`. + The default, if unspecified, is ``'SHA512'``. + + :param str compress_algo: The compression algorithm to use. Can be one + of ``'ZLIB'``, ``'BZIP2'``, ``'ZIP'``, or ``'Uncompressed'``. + + .. seealso:: :meth:`._encrypt` + """ + if _is_stream(data): + stream = data + else: + stream = _make_binary_stream(data, self._encoding) + result = self._encrypt(stream, recipients, **kwargs) + stream.close() + return result + + def decrypt(self, message, **kwargs): + """Decrypt the contents of a string or file-like object ``message``. + + :type message: file or str or :class:`io.BytesIO` + :param message: A string or file-like object to decrypt. + :param bool always_trust: Instruct GnuPG to ignore trust checks. + :param str passphrase: The passphrase for the secret key used for decryption. + :param str output: A filename to write the decrypted output to. + """ + stream = _make_binary_stream(message, self._encoding) + result = self.decrypt_file(stream, **kwargs) + stream.close() + return result + + def decrypt_file(self, filename, always_trust=False, passphrase=None, + output=None): + """Decrypt the contents of a file-like object ``filename`` . + + :param str filename: A file-like object to decrypt. + :param bool always_trust: Instruct GnuPG to ignore trust checks. + :param str passphrase: The passphrase for the secret key used for decryption. + :param str output: A filename to write the decrypted output to. + """ + args = ["--decrypt"] + if output: # write the output to a file with the specified name + if os.path.exists(output): + os.remove(output) # to avoid overwrite confirmation message + args.append('--output %s' % output) + if always_trust: + args.append("--always-trust") + result = self._result_map['crypt'](self) + self._handle_io(args, filename, result, passphrase, binary=True) + log.debug('decrypt result: %r', result.data) + return result + +class GPGUtilities(object): + """Extra tools for working with GnuPG.""" + + def __init__(self, gpg): + """Initialise extra utility functions.""" + self._gpg = gpg + + def find_key_by_email(self, email, secret=False): + """Find user's key based on their email address. + + :param str email: The email address to search for. + :param bool secret: If True, search through secret keyring. + """ + for key in self.list_keys(secret=secret): + for uid in key['uids']: + if re.search(email, uid): + return key + raise LookupError("GnuPG public key for email %s not found!" % email) + + def find_key_by_subkey(self, subkey): + """Find a key by a fingerprint of one of its subkeys. + + :param str subkey: The fingerprint of the subkey to search for. + """ + for key in self.list_keys(): + for sub in key['subkeys']: + if sub[0] == subkey: + return key + raise LookupError( + "GnuPG public key for subkey %s not found!" % subkey) + + def send_keys(self, keyserver, *keyids): + """Send keys to a keyserver.""" + result = self._result_map['list'](self) + log.debug('send_keys: %r', keyids) + data = _util._make_binary_stream("", self._encoding) + args = ['--keyserver', keyserver, '--send-keys'] + args.extend(keyids) + self._handle_io(args, data, result, binary=True) + log.debug('send_keys result: %r', result.__dict__) + data.close() + return result + + def encrypted_to(self, raw_data): + """Return the key to which raw_data is encrypted to.""" + # TODO: make this support multiple keys. + result = self._gpg.list_packets(raw_data) + if not result.key: + raise LookupError( + "Content is not encrypted to a GnuPG key!") + try: + return self.find_key_by_keyid(result.key) + except: + return self.find_key_by_subkey(result.key) + + def is_encrypted_sym(self, raw_data): + result = self._gpg.list_packets(raw_data) + return bool(result.need_passphrase_sym) + + def is_encrypted_asym(self, raw_data): + result = self._gpg.list_packets(raw_data) + return bool(result.key) + + def is_encrypted(self, raw_data): + return self.is_encrypted_asym(raw_data) or self.is_encrypted_sym(raw_data) + +if __name__ == "__main__": + from .test import test_gnupg + test_gnupg.main() diff --git a/importloader.py b/importloader.py new file mode 100644 index 0000000..ee52dd2 --- /dev/null +++ b/importloader.py @@ -0,0 +1,26 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + + +def load(name): + try: + obj = __import__(name) + reload(obj) + return obj + except: + pass + + try: + import importlib + obj = importlib.__import__(name) + importlib.reload(obj) + return obj + except: + pass + + +def loads(namelist): + for name in namelist: + obj = load(name) + if obj is not None: + return obj diff --git a/logrun.sh b/logrun.sh new file mode 100644 index 0000000..82a2105 --- /dev/null +++ b/logrun.sh @@ -0,0 +1,6 @@ +#!/bin/bash +cd `dirname $0` +eval $(ps -ef | grep "[0-9] python server\\.py m" | awk '{print "kill "$2}') +ulimit -n 512000 +nohup python server.py m>> ssserver.log 2>&1 & + diff --git a/mudb.json b/mudb.json new file mode 100644 index 0000000..0d4f101 --- /dev/null +++ b/mudb.json @@ -0,0 +1,2 @@ +[ +] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e2fe400 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,6 @@ +urllib3==1.20 +cymysql==0.8.9 +requests==2.13.0 +pyOpenSSL==16.2.0 +ndg-httpsclient==0.4.2 +pyasn1==0.2.2 diff --git a/run.sh b/run.sh new file mode 100644 index 0000000..89f7b05 --- /dev/null +++ b/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +cd `dirname $0` +eval $(ps -ef | grep "[0-9] python server\\.py m" | awk '{print "kill "$2}') +ulimit -n 512000 +nohup python server.py m>> /dev/null 2>&1 & + diff --git a/server.py b/server.py new file mode 100644 index 0000000..2ddd3a4 --- /dev/null +++ b/server.py @@ -0,0 +1,91 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 breakwall +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time +import sys +import threading +import os +import logging + +if __name__ == '__main__': + import inspect + os.chdir( + os.path.dirname( + os.path.realpath( + inspect.getfile( + inspect.currentframe())))) + +import server_pool +import db_transfer +import web_transfer +import speedtest_thread +import auto_thread +import auto_block +from shadowsocks import shell +from configloader import load_config, get_config + + +class MainThread(threading.Thread): + + def __init__(self, obj): + threading.Thread.__init__(self) + self.obj = obj + + def run(self): + self.obj.thread_db(self.obj) + + def stop(self): + self.obj.thread_db_stop() + + +def main(): + logging.basicConfig(level=logging.INFO, + format='%(levelname)-s: %(message)s') + + shell.check_python() + + if get_config().API_INTERFACE == 'modwebapi': + threadMain = MainThread(web_transfer.WebTransfer) + else: + threadMain = MainThread(db_transfer.DbTransfer) + threadMain.start() + + threadSpeedtest = MainThread(speedtest_thread.Speedtest) + threadSpeedtest.start() + + threadAutoexec = MainThread(auto_thread.AutoExec) + threadAutoexec.start() + + threadAutoblock = MainThread(auto_block.AutoBlock) + threadAutoblock.start() + + try: + while threadMain.is_alive(): + threadMain.join(10.0) + except (KeyboardInterrupt, IOError, OSError) as e: + import traceback + traceback.print_exc() + threadMain.stop() + if threadSpeedtest.is_alive(): + threadSpeedtest.stop() + if threadAutoexec.is_alive(): + threadAutoexec.stop() + if threadAutoblock.is_alive(): + threadAutoblock.stop() + +if __name__ == '__main__': + main() diff --git a/server_pool.py b/server_pool.py new file mode 100644 index 0000000..872a949 --- /dev/null +++ b/server_pool.py @@ -0,0 +1,594 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright (c) 2014 clowwindy +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import os +import logging +import time +from shadowsocks import shell, eventloop, tcprelay, udprelay, asyncdns, common +import threading +import sys +import traceback +from socket import * +from configloader import load_config, get_config + + +class MainThread(threading.Thread): + + def __init__(self, params): + threading.Thread.__init__(self) + self.params = params + + def run(self): + ServerPool._loop(*self.params) + + +class ServerPool(object): + + instance = None + + def __init__(self): + shell.check_python() + self.config = shell.get_config(False) + self.dns_resolver = asyncdns.DNSResolver() + if not self.config.get('dns_ipv6', False): + asyncdns.IPV6_CONNECTION_SUPPORT = False + + self.mgr = None # asyncmgr.ServerMgr() + + self.eventloop_pool = {} + self.dns_resolver_pool = {} + + self.dns_resolver = asyncdns.DNSResolver() + + self.loop = eventloop.EventLoop() + self.thread = MainThread((self.loop, self.dns_resolver, self.mgr)) + self.thread.start() + + self.tcp_servers_pool = {} + self.tcp_ipv6_servers_pool = {} + self.udp_servers_pool = {} + self.udp_ipv6_servers_pool = {} + self.stat_counter = {} + + self.uid_port_table = {} + + @staticmethod + def get_instance(): + if ServerPool.instance is None: + ServerPool.instance = ServerPool() + return ServerPool.instance + + def stop(self): + for port in self.eventloop_pool: + self.eventloop_pool[port].stop() + self.loop.stop() + + @staticmethod + def _loop(loop, dns_resolver, mgr): + try: + if mgr is not None: + mgr.add_to_loop(loop) + dns_resolver.add_to_loop(loop) + loop.run() + except (KeyboardInterrupt, IOError, OSError) as e: + logging.error(e) + import traceback + traceback.print_exc() + os.exit(0) + except Exception as e: + logging.error(e) + import traceback + traceback.print_exc() + + def server_is_run(self, port): + port = int(port) + ret = 0 + if port in self.tcp_servers_pool: + ret = 1 + if port in self.tcp_ipv6_servers_pool: + ret |= 2 + return ret + + def server_run_status(self, port): + if 'server' in self.config: + if port not in self.tcp_servers_pool: + return False + if 'server_ipv6' in self.config: + if port not in self.tcp_ipv6_servers_pool: + return False + return True + + def new_server(self, port, user_config): + ret = True + port = int(port) + ipv6_ok = False + + if 'server_ipv6' in self.config: + if port in self.tcp_ipv6_servers_pool: + logging.info( + "server already at %s:%d" % + (self.config['server_ipv6'], port)) + return 'this port server is already running' + else: + a_config = self.config.copy() + a_config.update(user_config) + if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][ + 0] == "[" and a_config['server_ipv6'][-1] == "]": + a_config['server_ipv6'] = a_config['server_ipv6'][1:-1] + a_config['server'] = a_config['server_ipv6'] + a_config['server_port'] = port + a_config['max_connect'] = 128 + a_config['method'] = common.to_str(a_config['method']) + try: + logging.info( + "starting server at [%s]:%d" % + (common.to_str( + a_config['server']), port)) + + tcp_server = tcprelay.TCPRelay( + a_config, self.dns_resolver, False, stat_counter=self.stat_counter) + tcp_server.add_to_loop(self.loop) + self.tcp_ipv6_servers_pool.update({port: tcp_server}) + + udp_server = udprelay.UDPRelay( + a_config, self.dns_resolver, False, stat_counter=self.stat_counter) + udp_server.add_to_loop(self.loop) + self.udp_ipv6_servers_pool.update({port: udp_server}) + + if common.to_str(a_config['server_ipv6']) == "::": + ipv6_ok = True + except Exception as e: + logging.warn("IPV6 %s " % (e,)) + + if 'server' in self.config: + if port in self.tcp_servers_pool: + logging.info("server already at %s:%d" % + (common.to_str(self.config['server']), port)) + return 'this port server is already running' + else: + a_config = self.config.copy() + a_config.update(user_config) + a_config['server_port'] = port + a_config['max_connect'] = 128 + a_config['method'] = common.to_str(a_config['method']) + try: + logging.info("starting server at %s:%d" % + (common.to_str(a_config['server']), port)) + + tcp_server = tcprelay.TCPRelay( + a_config, self.dns_resolver, False) + tcp_server.add_to_loop(self.loop) + self.tcp_servers_pool.update({port: tcp_server}) + + udp_server = udprelay.UDPRelay( + a_config, self.dns_resolver, False) + udp_server.add_to_loop(self.loop) + self.udp_servers_pool.update({port: udp_server}) + + except Exception as e: + if not ipv6_ok: + logging.warn("IPV4 %s " % (e,)) + + return True + + def del_server(self, port): + port = int(port) + logging.info("del server at %d" % port) + try: + udpsock = socket(AF_INET, SOCK_DGRAM) + udpsock.sendto( + '%s:%s:0:0' % + (get_config().MANAGE_PASS, + port), + (get_config().MANAGE_BIND_IP, + get_config().MANAGE_PORT)) + udpsock.close() + except Exception as e: + logging.warn(e) + return True + + def cb_del_server(self, port): + port = int(port) + + is_not_single = True + if port in self.eventloop_pool: + self.eventloop_pool[port].stop() + is_not_single = False + del self.eventloop_pool[port] + + if port in self.dns_resolver_pool: + del self.dns_resolver_pool[port] + + if port not in self.tcp_servers_pool: + logging.info( + "stopped server at %s:%d already stop" % + (self.config['server'], port)) + else: + logging.info( + "stopped server at %s:%d" % + (self.config['server'], port)) + try: + self.tcp_servers_pool[port].close(is_not_single) + del self.tcp_servers_pool[port] + except Exception as e: + logging.warn(e) + try: + self.udp_servers_pool[port].close(is_not_single) + del self.udp_servers_pool[port] + except Exception as e: + logging.warn(e) + + if 'server_ipv6' in self.config: + if port not in self.tcp_ipv6_servers_pool: + logging.info( + "stopped server at [%s]:%d already stop" % + (self.config['server_ipv6'], port)) + else: + logging.info( + "stopped server at [%s]:%d" % + (self.config['server_ipv6'], port)) + try: + self.tcp_ipv6_servers_pool[port].close(is_not_single) + del self.tcp_ipv6_servers_pool[port] + except Exception as e: + logging.warn(e) + try: + self.udp_ipv6_servers_pool[port].close(is_not_single) + del self.udp_ipv6_servers_pool[port] + except Exception as e: + logging.warn(e) + + return True + + def get_server_transfer(self, port): + port = int(port) + ret = [0, 0] + if port in self.tcp_servers_pool: + ret[0] = self.tcp_servers_pool[port].server_transfer_ul + ret[1] = self.tcp_servers_pool[port].server_transfer_dl + if port in self.udp_servers_pool: + ret[0] += self.udp_servers_pool[port].server_transfer_ul + ret[1] += self.udp_servers_pool[port].server_transfer_dl + if port in self.tcp_ipv6_servers_pool: + ret[0] += self.tcp_ipv6_servers_pool[port].server_transfer_ul + ret[1] += self.tcp_ipv6_servers_pool[port].server_transfer_dl + if port in self.udp_ipv6_servers_pool: + ret[0] += self.udp_ipv6_servers_pool[port].server_transfer_ul + ret[1] += self.udp_ipv6_servers_pool[port].server_transfer_dl + return ret + + def get_mu_server_transfer(self, port): + port = int(port) + ret = {} + if port in self.tcp_servers_pool: + tempdict = self.tcp_servers_pool[port].mu_server_transfer_ul + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][0] += tempdict[id] + tempdict = self.tcp_servers_pool[port].mu_server_transfer_dl + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][1] += tempdict[id] + if port in self.tcp_ipv6_servers_pool: + tempdict = self.tcp_ipv6_servers_pool[port].mu_server_transfer_ul + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][0] += tempdict[id] + tempdict = self.tcp_ipv6_servers_pool[port].mu_server_transfer_dl + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][1] += tempdict[id] + if port in self.udp_servers_pool: + tempdict = self.udp_servers_pool[port].mu_server_transfer_ul + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][0] += tempdict[id] + tempdict = self.udp_servers_pool[port].mu_server_transfer_dl + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][1] += tempdict[id] + if port in self.udp_ipv6_servers_pool: + tempdict = self.udp_ipv6_servers_pool[port].mu_server_transfer_ul + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][0] += tempdict[id] + tempdict = self.udp_ipv6_servers_pool[port].mu_server_transfer_dl + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [0, 0] + ret[self.uid_port_table[id]][1] += tempdict[id] + return ret + + def get_servers_transfer(self): + servers = self.tcp_servers_pool.copy() + servers.update(self.tcp_ipv6_servers_pool) + servers.update(self.udp_servers_pool) + servers.update(self.udp_ipv6_servers_pool) + ret = {} + for port in servers.keys(): + if servers[port]._config["is_multi_user"] == 0: + if port not in ret: + ret[port] = self.get_server_transfer(port) + else: + tempret = self.get_server_transfer(port) + ret[port][0] += tempret[0] + ret[port][1] += tempret[1] + else: + temprets = self.get_mu_server_transfer(port) + for port in temprets: + if port not in ret: + ret[port] = temprets[port][:] + else: + ret[port][0] += temprets[port][0] + ret[port][1] += temprets[port][1] + return ret + + def get_server_iplist(self, port): + port = int(port) + ret = [] + if port in self.tcp_servers_pool: + ret = self.tcp_servers_pool[port].connected_iplist[:] + self.tcp_servers_pool[port].connected_iplist_clean() + if port in self.udp_servers_pool: + templist = self.udp_servers_pool[port].connected_iplist[:] + for ip in templist: + if ip not in ret: + ret.append(ip) + self.udp_servers_pool[port].connected_iplist_clean() + if port in self.tcp_ipv6_servers_pool: + templist = self.tcp_ipv6_servers_pool[port].connected_iplist[:] + for ip in templist: + if ip not in ret: + ret.append(ip) + self.tcp_ipv6_servers_pool[port].connected_iplist_clean() + if port in self.udp_ipv6_servers_pool: + templist = self.udp_ipv6_servers_pool[port].connected_iplist[:] + for ip in templist: + if ip not in ret: + ret.append(ip) + self.udp_ipv6_servers_pool[port].connected_iplist_clean() + return ret + + def get_mu_server_iplist(self, port): + port = int(port) + ret = {} + if port in self.tcp_servers_pool: + tempdict = self.tcp_servers_pool[port].mu_connected_iplist.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for ip in tempdict[id]: + tempret.append(ip) + ret[self.uid_port_table[id]] = tempret[:] + self.tcp_servers_pool[port].mu_connected_iplist_clean() + if port in self.tcp_ipv6_servers_pool: + tempdict = self.tcp_ipv6_servers_pool[ + port].mu_connected_iplist.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for ip in tempdict[id]: + tempret.append(ip) + ret[self.uid_port_table[id]] = tempret[:] + self.tcp_ipv6_servers_pool[port].mu_connected_iplist_clean() + if port in self.udp_servers_pool: + tempdict = self.udp_servers_pool[port].mu_connected_iplist.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for ip in tempdict[id]: + tempret.append(ip) + ret[self.uid_port_table[id]] = tempret[:] + self.udp_servers_pool[port].mu_connected_iplist_clean() + if port in self.udp_ipv6_servers_pool: + tempdict = self.udp_ipv6_servers_pool[ + port].mu_connected_iplist.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for ip in tempdict[id]: + tempret.append(ip) + ret[self.uid_port_table[id]] = tempret[:] + self.udp_ipv6_servers_pool[port].mu_connected_iplist_clean() + return ret + + def get_servers_iplist(self): + servers = self.tcp_servers_pool.copy() + servers.update(self.tcp_ipv6_servers_pool) + servers.update(self.udp_servers_pool) + servers.update(self.udp_ipv6_servers_pool) + ret = {} + for port in servers.keys(): + if servers[port]._config["is_multi_user"] == 0: + templist = self.get_server_iplist(port) + if templist != []: + if port not in ret: + ret[port] = templist[:] + else: + ret[port] = ret[port] + templist[:] + else: + templist = self.get_mu_server_iplist(port) + for id in templist: + for ip in templist[id]: + if id not in ret: + ret[id] = [] + if ip not in ret[id]: + tempret = ret[id][:] + tempret.append(ip) + ret[id] = tempret[:] + return ret + + def get_servers_detect_log(self): + servers = self.tcp_servers_pool.copy() + servers.update(self.tcp_ipv6_servers_pool) + servers.update(self.udp_servers_pool) + servers.update(self.udp_ipv6_servers_pool) + ret = {} + for port in servers.keys(): + if servers[port]._config["is_multi_user"] == 0: + templist = self.get_server_detect_log(port) + if templist != []: + if port not in ret: + ret[port] = templist[:] + else: + ret[port] = ret[port] + templist[:] + else: + templist = self.get_mu_server_detect_log(port) + for id in templist: + for itemid in templist[id]: + if id not in ret: + ret[id] = [] + if itemid not in ret[id]: + tempret = ret[id][:] + tempret.append(itemid) + ret[id] = tempret[:] + + return ret + + def get_server_detect_log(self, port): + port = int(port) + ret = [] + if port in self.tcp_servers_pool: + ret = self.tcp_servers_pool[port].detect_log_list[:] + self.tcp_servers_pool[port].detect_log_list_clean() + if port in self.udp_servers_pool: + templist = self.udp_servers_pool[port].detect_log_list[:] + for id in templist: + if id not in ret: + ret.append(id) + self.udp_servers_pool[port].detect_log_list_clean() + if port in self.tcp_ipv6_servers_pool: + templist = self.tcp_ipv6_servers_pool[port].detect_log_list[:] + for id in templist: + if id not in ret: + ret.append(id) + self.tcp_ipv6_servers_pool[port].detect_log_list_clean() + if port in self.udp_ipv6_servers_pool: + templist = self.udp_ipv6_servers_pool[port].detect_log_list[:] + for id in templist: + if id not in ret: + ret.append(id) + self.udp_ipv6_servers_pool[port].detect_log_list_clean() + return ret + + def get_mu_server_detect_log(self, port): + port = int(port) + ret = {} + if port in self.tcp_servers_pool: + tempdict = self.tcp_servers_pool[port].mu_detect_log_list.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for itemid in tempdict[id]: + tempret.append(itemid) + ret[self.uid_port_table[id]] = tempret[:] + self.tcp_servers_pool[port].mu_detect_log_list_clean() + if port in self.tcp_ipv6_servers_pool: + tempdict = self.tcp_ipv6_servers_pool[ + port].mu_detect_log_list.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for itemid in tempdict[id]: + tempret.append(itemid) + ret[self.uid_port_table[id]] = tempret[:] + self.tcp_ipv6_servers_pool[port].mu_detect_log_list_clean() + if port in self.udp_servers_pool: + tempdict = self.udp_servers_pool[port].mu_detect_log_list.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for itemid in tempdict[id]: + tempret.append(itemid) + ret[self.uid_port_table[id]] = tempret[:] + self.udp_servers_pool[port].mu_detect_log_list_clean() + if port in self.udp_ipv6_servers_pool: + tempdict = self.udp_ipv6_servers_pool[ + port].mu_detect_log_list.copy() + for id in tempdict: + if self.uid_port_table[id] not in ret: + ret[self.uid_port_table[id]] = [] + tempret = ret[self.uid_port_table[id]][:] + for itemid in tempdict[id]: + tempret.append(itemid) + ret[self.uid_port_table[id]] = tempret[:] + self.udp_ipv6_servers_pool[port].mu_detect_log_list_clean() + return ret + + def get_server_wrong(self, port): + port = int(port) + ret = [] + if port in self.tcp_servers_pool: + templist = self.tcp_servers_pool[port].wrong_iplist.copy() + for ip in templist: + if ip not in ret and templist[ip] < time.time() - 60: + ret.append(ip) + self.tcp_servers_pool[port].wrong_iplist_clean() + if port in self.udp_servers_pool: + templist = self.udp_servers_pool[port].wrong_iplist.copy() + for ip in templist: + if ip not in ret and templist[ip] < time.time() - 60: + ret.append(ip) + self.udp_servers_pool[port].wrong_iplist_clean() + if port in self.tcp_ipv6_servers_pool: + templist = self.tcp_ipv6_servers_pool[port].wrong_iplist.copy() + for ip in templist: + if ip not in ret and templist[ip] < time.time() - 60: + ret.append(ip) + self.tcp_ipv6_servers_pool[port].wrong_iplist_clean() + if port in self.udp_ipv6_servers_pool: + templist = self.udp_ipv6_servers_pool[port].wrong_iplist.copy() + for ip in templist: + if ip not in ret and templist[ip] < time.time() - 60: + ret.append(ip) + self.udp_ipv6_servers_pool[port].wrong_iplist_clean() + return ret + + def get_servers_wrong(self): + servers = self.tcp_servers_pool.copy() + servers.update(self.tcp_ipv6_servers_pool) + servers.update(self.udp_servers_pool) + servers.update(self.udp_ipv6_servers_pool) + ret = {} + for port in servers.keys(): + templist = self.get_server_wrong(port) + if templist != []: + ret[port] = templist[:] + return ret + + def push_uid_port_table(self, table): + self.uid_port_table = table diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..50a3808 --- /dev/null +++ b/setup.py @@ -0,0 +1,39 @@ +import codecs +from setuptools import setup + + +with codecs.open('README.rst', encoding='utf-8') as f: + long_description = f.read() + +setup( + name="shadowsocks", + version="2.6.12", + license='http://www.apache.org/licenses/LICENSE-2.0', + description="A fast tunnel proxy that help you get through firewalls", + author='clowwindy', + author_email='clowwindy42@gmail.com', + url='https://github.com/shadowsocks/shadowsocks', + packages=['shadowsocks', 'shadowsocks.crypto', 'shadowsocks.obfsplugin'], + package_data={ + 'shadowsocks': ['README.rst', 'LICENSE'] + }, + install_requires=[], + entry_points=""" + [console_scripts] + sslocal = shadowsocks.local:main + ssserver = shadowsocks.server:main + """, + classifiers=[ + 'License :: OSI Approved :: Apache Software License', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: Implementation :: CPython', + 'Programming Language :: Python :: Implementation :: PyPy', + 'Topic :: Internet :: Proxy Servers', + ], + long_description=long_description, +) diff --git a/shadowsocks.sql b/shadowsocks.sql new file mode 100644 index 0000000..8d84992 --- /dev/null +++ b/shadowsocks.sql @@ -0,0 +1,24 @@ +SET FOREIGN_KEY_CHECKS=0; + +CREATE TABLE `user` ( + `id` int(11) NOT NULL AUTO_INCREMENT, + `email` varchar(32) NOT NULL, + `pass` varchar(16) NOT NULL, + `passwd` varchar(16) NOT NULL, + `t` int(11) NOT NULL DEFAULT '0', + `u` bigint(20) NOT NULL, + `d` bigint(20) NOT NULL, + `transfer_enable` bigint(20) NOT NULL, + `port` int(11) NOT NULL, + `switch` tinyint(4) NOT NULL DEFAULT '1', + `enable` tinyint(4) NOT NULL DEFAULT '1', + `type` tinyint(4) NOT NULL DEFAULT '1', + `last_get_gift_time` int(11) NOT NULL DEFAULT '0', + `last_rest_pass_time` int(11) NOT NULL DEFAULT '0', + PRIMARY KEY (`id`,`port`) +) ENGINE=InnoDB AUTO_INCREMENT=415 DEFAULT CHARSET=utf8; + +-- ---------------------------- +-- Records of user +-- ---------------------------- +INSERT INTO `user` VALUES ('7', 'test@test.com', '123456', '0000000', '1410609560', '0', '0', '9320666234', '50000', '1', '1', '7', '0', '0'); \ No newline at end of file diff --git a/shadowsocks/__init__.py b/shadowsocks/__init__.py new file mode 100644 index 0000000..dc3abd4 --- /dev/null +++ b/shadowsocks/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/python +# +# Copyright 2012-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement diff --git a/shadowsocks/asyncdns.py b/shadowsocks/asyncdns.py new file mode 100644 index 0000000..bbe3107 --- /dev/null +++ b/shadowsocks/asyncdns.py @@ -0,0 +1,554 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import socket +import struct +import re +import logging + +if __name__ == '__main__': + import sys + import inspect + file_path = os.path.dirname(os.path.realpath(inspect.getfile(inspect.currentframe()))) + sys.path.insert(0, os.path.join(file_path, '../')) + +from shadowsocks import common, lru_cache, eventloop, shell + + +CACHE_SWEEP_INTERVAL = 30 + +VALID_HOSTNAME = re.compile(br"(?!-)[A-Z\d_-]{1,63}(? 63: + return None + results.append(common.chr(l)) + results.append(label) + results.append(b'\0') + return b''.join(results) + + +def build_request(address, qtype): + request_id = os.urandom(2) + header = struct.pack('!BBHHHH', 1, 0, 1, 0, 0, 0) + addr = build_address(address) + qtype_qclass = struct.pack('!HH', qtype, QCLASS_IN) + return request_id + header + addr + qtype_qclass + + +def parse_ip(addrtype, data, length, offset): + if addrtype == QTYPE_A: + return socket.inet_ntop(socket.AF_INET, data[offset:offset + length]) + elif addrtype == QTYPE_AAAA: + return socket.inet_ntop(socket.AF_INET6, data[offset:offset + length]) + elif addrtype in [QTYPE_CNAME, QTYPE_NS]: + return parse_name(data, offset)[1] + else: + return data[offset:offset + length] + + +def parse_name(data, offset): + p = offset + labels = [] + l = common.ord(data[p]) + while l > 0: + if (l & (128 + 64)) == (128 + 64): + # pointer + pointer = struct.unpack('!H', data[p:p + 2])[0] + pointer &= 0x3FFF + r = parse_name(data, pointer) + labels.append(r[1]) + p += 2 + # pointer is the end + return p - offset, b'.'.join(labels) + else: + labels.append(data[p + 1:p + 1 + l]) + p += 1 + l + l = common.ord(data[p]) + return p - offset + 1, b'.'.join(labels) + + +# rfc1035 +# record +# 1 1 1 1 1 1 +# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +# | | +# / / +# / NAME / +# | | +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +# | TYPE | +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +# | CLASS | +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +# | TTL | +# | | +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +# | RDLENGTH | +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--| +# / RDATA / +# / / +# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ +def parse_record(data, offset, question=False): + nlen, name = parse_name(data, offset) + if not question: + record_type, record_class, record_ttl, record_rdlength = struct.unpack( + '!HHiH', data[offset + nlen:offset + nlen + 10] + ) + ip = parse_ip(record_type, data, record_rdlength, offset + nlen + 10) + return nlen + 10 + record_rdlength, \ + (name, ip, record_type, record_class, record_ttl) + else: + record_type, record_class = struct.unpack( + '!HH', data[offset + nlen:offset + nlen + 4] + ) + return nlen + 4, (name, None, record_type, record_class, None, None) + + +def parse_header(data): + if len(data) >= 12: + header = struct.unpack('!HBBHHHH', data[:12]) + res_id = header[0] + res_qr = header[1] & 128 + res_tc = header[1] & 2 + res_ra = header[2] & 128 + res_rcode = header[2] & 15 + # assert res_tc == 0 + # assert res_rcode in [0, 3] + res_qdcount = header[3] + res_ancount = header[4] + res_nscount = header[5] + res_arcount = header[6] + return (res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, + res_ancount, res_nscount, res_arcount) + return None + + +def parse_response(data): + try: + if len(data) >= 12: + header = parse_header(data) + if not header: + return None + res_id, res_qr, res_tc, res_ra, res_rcode, res_qdcount, \ + res_ancount, res_nscount, res_arcount = header + + qds = [] + ans = [] + offset = 12 + for i in range(0, res_qdcount): + l, r = parse_record(data, offset, True) + offset += l + if r: + qds.append(r) + for i in range(0, res_ancount): + l, r = parse_record(data, offset) + offset += l + if r: + ans.append(r) + for i in range(0, res_nscount): + l, r = parse_record(data, offset) + offset += l + for i in range(0, res_arcount): + l, r = parse_record(data, offset) + offset += l + response = DNSResponse() + if qds: + response.hostname = qds[0][0] + for an in qds: + response.questions.append((an[1], an[2], an[3])) + for an in ans: + response.answers.append((an[1], an[2], an[3])) + return response + except Exception as e: + shell.print_exception(e) + return None + + +def is_valid_hostname(hostname): + if len(hostname) > 255: + return False + if hostname[-1] == b'.': + hostname = hostname[:-1] + return all(VALID_HOSTNAME.match(x) for x in hostname.split(b'.')) + + +class DNSResponse(object): + def __init__(self): + self.hostname = None + self.questions = [] # each: (addr, type, class) + self.answers = [] # each: (addr, type, class) + + def __str__(self): + return '%s: %s' % (self.hostname, str(self.answers)) + + +STATUS_IPV4 = 0 +STATUS_IPV6 = 1 + + +class DNSResolver(object): + + def __init__(self): + self._loop = None + self._hosts = {} + self._hostname_status = {} + self._hostname_to_cb = {} + self._cb_to_hostname = {} + self._cache = lru_cache.LRUCache(timeout=300) + self._sock = None + self._servers = None + self._parse_resolv() + self._parse_hosts() + # TODO monitor hosts change and reload hosts + # TODO parse /etc/gai.conf and follow its rules + + def _parse_resolv(self): + self._servers = [] + try: + with open('dns.conf', 'rb') as f: + content = f.readlines() + for line in content: + line = line.strip() + if line: + parts = line.split(b' ', 1) + if len(parts) >= 2: + server = parts[0] + port = int(parts[1]) + else: + server = parts[0] + port = 53 + if common.is_ip(server) == socket.AF_INET: + if type(server) != str: + server = server.decode('utf8') + self._servers.append((server, port)) + except IOError: + pass + if not self._servers: + try: + with open('/etc/resolv.conf', 'rb') as f: + content = f.readlines() + for line in content: + line = line.strip() + if line: + if line.startswith(b'nameserver'): + parts = line.split() + if len(parts) >= 2: + server = parts[1] + if common.is_ip(server) == socket.AF_INET: + if type(server) != str: + server = server.decode('utf8') + self._servers.append((server, 53)) + except IOError: + pass + if not self._servers: + self._servers = [('8.8.4.4', 53), ('8.8.8.8', 53)] + logging.info('dns server: %s' % (self._servers,)) + + def _parse_hosts(self): + etc_path = '/etc/hosts' + if 'WINDIR' in os.environ: + etc_path = os.environ['WINDIR'] + '/system32/drivers/etc/hosts' + try: + with open(etc_path, 'rb') as f: + for line in f.readlines(): + line = line.strip() + if b"#" in line: + line = line[:line.find(b'#')] + parts = line.split() + if len(parts) >= 2: + ip = parts[0] + if common.is_ip(ip): + for i in range(1, len(parts)): + hostname = parts[i] + if hostname: + self._hosts[hostname] = ip + except IOError: + self._hosts['localhost'] = '127.0.0.1' + + def add_to_loop(self, loop): + if self._loop: + raise Exception('already add to loop') + self._loop = loop + # TODO when dns server is IPv6 + self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + self._sock.setblocking(False) + loop.add(self._sock, eventloop.POLL_IN, self) + loop.add_periodic(self.handle_periodic) + + def _call_callback(self, hostname, ip, error=None): + callbacks = self._hostname_to_cb.get(hostname, []) + for callback in callbacks: + if callback in self._cb_to_hostname: + del self._cb_to_hostname[callback] + if ip or error: + callback((hostname, ip), error) + else: + callback((hostname, None), + Exception('unable to parse hostname %s' % hostname)) + if hostname in self._hostname_to_cb: + del self._hostname_to_cb[hostname] + if hostname in self._hostname_status: + del self._hostname_status[hostname] + + def _handle_data(self, data): + response = parse_response(data) + if response and response.hostname: + hostname = response.hostname + ip = None + for answer in response.answers: + if answer[1] in (QTYPE_A, QTYPE_AAAA) and \ + answer[2] == QCLASS_IN: + ip = answer[0] + break + if IPV6_CONNECTION_SUPPORT: + if not ip and self._hostname_status.get(hostname, STATUS_IPV4) \ + == STATUS_IPV6: + self._hostname_status[hostname] = STATUS_IPV4 + self._send_req(hostname, QTYPE_A) + else: + if ip: + self._cache[hostname] = ip + self._call_callback(hostname, ip) + elif self._hostname_status.get(hostname, None) == STATUS_IPV4: + for question in response.questions: + if question[1] == QTYPE_A: + self._call_callback(hostname, None) + break + else: + if not ip and self._hostname_status.get(hostname, STATUS_IPV6) \ + == STATUS_IPV4: + self._hostname_status[hostname] = STATUS_IPV6 + self._send_req(hostname, QTYPE_AAAA) + else: + if ip: + self._cache[hostname] = ip + self._call_callback(hostname, ip) + elif self._hostname_status.get(hostname, None) == STATUS_IPV6: + for question in response.questions: + if question[1] == QTYPE_AAAA: + self._call_callback(hostname, None) + break + + def handle_event(self, sock, fd, event): + if sock != self._sock: + return + if event & eventloop.POLL_ERR: + logging.error('dns socket err') + self._loop.remove(self._sock) + self._sock.close() + # TODO when dns server is IPv6 + self._sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + self._sock.setblocking(False) + self._loop.add(self._sock, eventloop.POLL_IN, self) + else: + data, addr = sock.recvfrom(1024) + if addr not in self._servers: + logging.warn('received a packet other than our dns') + return + self._handle_data(data) + + def handle_periodic(self): + self._cache.sweep() + + def remove_callback(self, callback): + hostname = self._cb_to_hostname.get(callback) + if hostname: + del self._cb_to_hostname[callback] + arr = self._hostname_to_cb.get(hostname, None) + if arr: + arr.remove(callback) + if not arr: + del self._hostname_to_cb[hostname] + if hostname in self._hostname_status: + del self._hostname_status[hostname] + + def _send_req(self, hostname, qtype): + req = build_request(hostname, qtype) + for server in self._servers: + logging.debug('resolving %s with type %d using server %s', + hostname, qtype, server) + self._sock.sendto(req, server) + + def resolve(self, hostname, callback): + if type(hostname) != bytes: + hostname = hostname.encode('utf8') + if not hostname: + callback(None, Exception('empty hostname')) + elif common.is_ip(hostname): + callback((hostname, hostname), None) + elif hostname in self._hosts: + logging.debug('hit hosts: %s', hostname) + ip = self._hosts[hostname] + callback((hostname, ip), None) + elif hostname in self._cache: + logging.debug('hit cache: %s', hostname) + ip = self._cache[hostname] + callback((hostname, ip), None) + else: + if not is_valid_hostname(hostname): + callback(None, Exception('invalid hostname: %s' % hostname)) + return + if False: + addrs = socket.getaddrinfo(hostname, 0, 0, + socket.SOCK_DGRAM, socket.SOL_UDP) + if addrs: + af, socktype, proto, canonname, sa = addrs[0] + logging.debug('DNS resolve %s %s' % (hostname, sa[0]) ) + self._cache[hostname] = sa[0] + callback((hostname, sa[0]), None) + return + arr = self._hostname_to_cb.get(hostname, None) + if not arr: + if IPV6_CONNECTION_SUPPORT: + self._hostname_status[hostname] = STATUS_IPV6 + self._send_req(hostname, QTYPE_AAAA) + else: + self._hostname_status[hostname] = STATUS_IPV4 + self._send_req(hostname, QTYPE_A) + self._hostname_to_cb[hostname] = [callback] + self._cb_to_hostname[callback] = hostname + else: + arr.append(callback) + # TODO send again only if waited too long + if IPV6_CONNECTION_SUPPORT: + self._send_req(hostname, QTYPE_AAAA) + else: + self._send_req(hostname, QTYPE_A) + + def close(self): + if self._sock: + if self._loop: + self._loop.remove_periodic(self.handle_periodic) + self._loop.remove(self._sock) + self._sock.close() + self._sock = None + + +def test(): + dns_resolver = DNSResolver() + loop = eventloop.EventLoop() + dns_resolver.add_to_loop(loop) + + global counter + counter = 0 + + def make_callback(): + global counter + + def callback(result, error): + global counter + # TODO: what can we assert? + print(result, error) + counter += 1 + if counter == 9: + dns_resolver.close() + loop.stop() + a_callback = callback + return a_callback + + assert(make_callback() != make_callback()) + + dns_resolver.resolve(b'google.com', make_callback()) + dns_resolver.resolve('google.com', make_callback()) + dns_resolver.resolve('example.com', make_callback()) + dns_resolver.resolve('ipv6.google.com', make_callback()) + dns_resolver.resolve('www.facebook.com', make_callback()) + dns_resolver.resolve('ns2.google.com', make_callback()) + dns_resolver.resolve('invalid.@!#$%^&$@.hostname', make_callback()) + dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'long.hostname', make_callback()) + dns_resolver.resolve('toooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'ooooooooooooooooooooooooooooooooooooooooooooooooooo' + 'long.hostname', make_callback()) + + loop.run() + + +if __name__ == '__main__': + test() diff --git a/shadowsocks/common.py b/shadowsocks/common.py new file mode 100644 index 0000000..3261819 --- /dev/null +++ b/shadowsocks/common.py @@ -0,0 +1,512 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2013-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import socket +import struct +import logging +import binascii +import re +import hashlib +import random +from configloader import load_config, get_config + + +from shadowsocks import lru_cache + +def compat_ord(s): + if isinstance(s, int): + return s + return _ord(s) + + +def compat_chr(d): + if bytes == str: + return _chr(d) + return bytes([d]) + + +_ord = ord +_chr = chr +ord = compat_ord +chr = compat_chr + +connect_log = logging.debug + + +def to_bytes(s): + if bytes != str: + if isinstance(s, str): + return s.encode('utf-8') + return s + + +def to_str(s): + if bytes != str: + if isinstance(s, bytes): + return s.decode('utf-8') + return s + +def random_base64_str(randomlength = 8): + str = '' + chars = 'ABCDEF0123456789' + length = len(chars) - 1 + for i in range(randomlength): + str += chars[random.randint(0, length)] + return str + + +def int32(x): + if x > 0xFFFFFFFF or x < 0: + x &= 0xFFFFFFFF + if x > 0x7FFFFFFF: + x = int(0x100000000 - x) + if x < 0x80000000: + return -x + else: + return -2147483648 + return x + + +def inet_ntop(family, ipstr): + if family == socket.AF_INET: + return to_bytes(socket.inet_ntoa(ipstr)) + elif family == socket.AF_INET6: + import re + v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0') + for i, j in zip(ipstr[::2], ipstr[1::2])) + v6addr = re.sub('::+', '::', v6addr, count=1) + return to_bytes(v6addr) + + +def inet_pton(family, addr): + addr = to_str(addr) + if family == socket.AF_INET: + return socket.inet_aton(addr) + elif family == socket.AF_INET6: + if '.' in addr: # a v4 addr + v4addr = addr[addr.rindex(':') + 1:] + v4addr = socket.inet_aton(v4addr) + v4addr = ['%02X' % ord(x) for x in v4addr] + v4addr.insert(2, ':') + newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr) + return inet_pton(family, newaddr) + dbyts = [0] * 8 # 8 groups + grps = addr.split(':') + for i, v in enumerate(grps): + if v: + dbyts[i] = int(v, 16) + else: + for j, w in enumerate(grps[::-1]): + if w: + dbyts[7 - j] = int(w, 16) + else: + break + break + return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts) + else: + raise RuntimeError("What family?") + + +def is_ip(address): + for family in (socket.AF_INET, socket.AF_INET6): + try: + if not isinstance(address, str): + address = address.decode('utf8') + inet_pton(family, address) + return family + except (TypeError, ValueError, OSError, IOError): + pass + return False + + +def sync_str_bytes(obj, target_example): + """sync (obj)'s type to (target_example)'s type""" + if type(obj) != type(target_example): + if type(target_example) == str: + obj = to_str(obj) + if type(target_example) == bytes: + obj = to_bytes(obj) + return obj + + +def match_ipv4_address(text): + reip = re.compile(r'(? 255: + address = address[:255] # TODO + return b'\x03' + chr(len(address)) + address + + +def pre_parse_header(data): + if not data: + return None + datatype = ord(data[0]) + if datatype == 0x80: + if len(data) <= 2: + return None + rand_data_size = ord(data[1]) + if rand_data_size + 2 >= len(data): + logging.warn('header too short, maybe wrong password or ' + 'encryption method') + return None + data = data[rand_data_size + 2:] + elif datatype == 0x81: + data = data[1:] + elif datatype == 0x82: + if len(data) <= 3: + return None + rand_data_size = struct.unpack('>H', data[1:3])[0] + if rand_data_size + 3 >= len(data): + logging.warn('header too short, maybe wrong password or ' + 'encryption method') + return None + data = data[rand_data_size + 3:] + elif datatype == 0x88 or (~datatype & 0xff) == 0x88: + if len(data) <= 7 + 7: + return None + data_size = struct.unpack('>H', data[1:3])[0] + ogn_data = data + data = data[:data_size] + crc = binascii.crc32(data) & 0xffffffff + if crc != 0xffffffff: + logging.warn('uncorrect CRC32, maybe wrong password or ' + 'encryption method') + return None + start_pos = 3 + ord(data[3]) + data = data[start_pos:-4] + if data_size < len(ogn_data): + data += ogn_data[data_size:] + return data + + +def parse_header(data): + addrtype = ord(data[0]) + dest_addr = None + dest_port = None + header_length = 0 + connecttype = (addrtype & 0x8) and 1 or 0 + addrtype &= ~0x8 + if addrtype == ADDRTYPE_IPV4: + if len(data) >= 7: + dest_addr = socket.inet_ntoa(data[1:5]) + dest_port = struct.unpack('>H', data[5:7])[0] + header_length = 7 + else: + logging.warn('header is too short') + elif addrtype == ADDRTYPE_HOST: + if len(data) > 2: + addrlen = ord(data[1]) + if len(data) >= 4 + addrlen: + dest_addr = data[2:2 + addrlen] + dest_port = struct.unpack('>H', data[2 + addrlen:4 + + addrlen])[0] + header_length = 4 + addrlen + else: + logging.warn('header is too short') + else: + logging.warn('header is too short') + elif addrtype == ADDRTYPE_IPV6: + if len(data) >= 19: + dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17]) + dest_port = struct.unpack('>H', data[17:19])[0] + header_length = 19 + else: + logging.warn('header is too short') + else: + logging.warn('unsupported addrtype %d, maybe wrong password or ' + 'encryption method' % addrtype) + if dest_addr is None: + return None + return connecttype, addrtype, to_bytes(dest_addr), dest_port, header_length + +def getRealIp(ip): + return to_str(ip.replace("::ffff:", "")) + + +class IPNetwork(object): + ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0} + + def __init__(self, addrs): + self.addrs_str = addrs + self._network_list_v4 = [] + self._network_list_v6 = [] + if not isinstance(addrs, str): + addrs = to_str(addrs) + addrs = addrs.split(',') + list(map(self.add_network, addrs)) + + def add_network(self, addr): + if addr is "": + return + + addr = addr.replace("::ffff:", "") + + block = addr.split('/') + addr_family = is_ip(block[0]) + addr_len = IPNetwork.ADDRLENGTH[addr_family] + if addr_family is socket.AF_INET: + ip, = struct.unpack("!I", socket.inet_aton(block[0])) + elif addr_family is socket.AF_INET6: + hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0])) + ip = (hi << 64) | lo + else: + raise Exception("Not a valid CIDR notation: %s" % addr) + if len(block) is 1: + prefix_size = 0 + while (ip & 1) == 0 and ip is not 0: + ip >>= 1 + prefix_size += 1 + logging.warn("You did't specify CIDR routing prefix size for %s, " + "implicit treated as %s/%d" % (addr, addr, addr_len)) + elif block[1].isdigit() and int(block[1]) <= addr_len: + prefix_size = addr_len - int(block[1]) + ip >>= prefix_size + else: + raise Exception("Not a valid CIDR notation: %s" % addr) + if addr_family is socket.AF_INET: + self._network_list_v4.append((ip, prefix_size)) + else: + self._network_list_v6.append((ip, prefix_size)) + + def __contains__(self, addr): + addr = addr.replace("::ffff:", "") + + addr_family = is_ip(addr) + if addr_family is socket.AF_INET: + ip, = struct.unpack("!I", socket.inet_aton(addr)) + return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1], + self._network_list_v4)) + elif addr_family is socket.AF_INET6: + hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr)) + ip = (hi << 64) | lo + return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1], + self._network_list_v6)) + else: + return False + + def __cmp__(self, other): + return cmp(self.addrs_str, other.addrs_str) + + def __eq__(self, other): + return self.addrs_str == other.addrs_str + + def __ne__(self, other): + return self.addrs_str != other.addrs_str + +class PortRange(object): + + def __init__(self, range_str): + self.range_str = to_str(range_str) + self.range = set() + range_str = to_str(range_str).split(',') + for item in range_str: + try: + int_range = item.split('-') + if len(int_range) == 1: + if item: + self.range.add(int(item)) + elif len(int_range) == 2: + int_range[0] = int(int_range[0]) + int_range[1] = int(int_range[1]) + if int_range[0] < 0: + int_range[0] = 0 + if int_range[1] > 65535: + int_range[1] = 65535 + i = int_range[0] + while i <= int_range[1]: + self.range.add(i) + i += 1 + except Exception as e: + logging.error(e) + + def __contains__(self, val): + return val in self.range + + def __cmp__(self, other): + return cmp(self.range_str, other.range_str) + + def __eq__(self, other): + return self.range_str == other.range_str + + def __ne__(self, other): + return self.range_str != other.range_str + +class UDPAsyncDNSHandler(object): + dns_cache = lru_cache.LRUCache(timeout=1800) + def __init__(self, params): + self.params = params + self.remote_addr = None + self.call_back = None + + def resolve(self, dns_resolver, remote_addr, call_back): + if remote_addr in UDPAsyncDNSHandler.dns_cache: + if call_back: + call_back("", remote_addr, UDPAsyncDNSHandler.dns_cache[remote_addr], self.params) + else: + self.call_back = call_back + self.remote_addr = remote_addr + dns_resolver.resolve(remote_addr[0], self._handle_dns_resolved) + UDPAsyncDNSHandler.dns_cache.sweep() + + def _handle_dns_resolved(self, result, error): + if error: + logging.error("%s when resolve DNS" % (error,)) #drop + return self.call_back(error, self.remote_addr, None, self.params) + if result: + ip = result[1] + if ip: + return self.call_back("", self.remote_addr, ip, self.params) + logging.warning("can't resolve %s" % (self.remote_addr,)) + return self.call_back("fail to resolve", self.remote_addr, None, self.params) + +def test_inet_conv(): + ipv4 = b'8.8.4.4' + b = inet_pton(socket.AF_INET, ipv4) + assert inet_ntop(socket.AF_INET, b) == ipv4 + ipv6 = b'2404:6800:4005:805::1011' + b = inet_pton(socket.AF_INET6, ipv6) + assert inet_ntop(socket.AF_INET6, b) == ipv6 + + +def test_parse_header(): + assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \ + (0, ADDRTYPE_HOST ,b'www.google.com', 80, 18) + assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \ + (0, ADDRTYPE_IPV4 ,b'8.8.8.8', 53, 7) + assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00' + b'\x00\x10\x11\x00\x50')) == \ + (0, ADDRTYPE_IPV6 ,b'2404:6800:4005:805::1011', 80, 19) + + +def test_pack_header(): + assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08' + assert pack_addr(b'2404:6800:4005:805::1011') == \ + b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11' + assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com' + + +def test_ip_network(): + ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0') + assert '127.0.0.1' in ip_network + assert '127.0.1.1' not in ip_network + assert ':ff:ffff' in ip_network + assert '::ffff:1' not in ip_network + assert '::1' in ip_network + assert '::2' not in ip_network + assert '192.168.1.1' in ip_network + assert '192.168.1.2' not in ip_network + assert '192.0.2.1' in ip_network + assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23 + assert 'www.google.com' not in ip_network + + +def test_sync_str_bytes(): + assert sync_str_bytes(b'a\.b', b'a\.b') == b'a\.b' + assert sync_str_bytes('a\.b', b'a\.b') == b'a\.b' + assert sync_str_bytes(b'a\.b', 'a\.b') == 'a\.b' + assert sync_str_bytes('a\.b', 'a\.b') == 'a\.b' + pass + + +def test_match_regex(): + assert match_regex(br'a\.b', b'abc,aaa,aaa,b,aaa.b,a.b') + assert match_regex(r'a\.b', b'abc,aaa,aaa,b,aaa.b,a.b') + assert match_regex(br'a\.b', b'abc,aaa,aaa,b,aaa.b,a.b') + assert match_regex(r'a\.b', b'abc,aaa,aaa,b,aaa.b,a.b') + assert match_regex(r'\bgoogle\.com\b', b' google.com ') + pass + + +if __name__ == '__main__': + test_sync_str_bytes() + test_match_regex() + test_inet_conv() + test_parse_header() + test_pack_header() + test_ip_network() diff --git a/shadowsocks/crypto/__init__.py b/shadowsocks/crypto/__init__.py new file mode 100644 index 0000000..401c7b7 --- /dev/null +++ b/shadowsocks/crypto/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement diff --git a/shadowsocks/crypto/aead.py b/shadowsocks/crypto/aead.py new file mode 100644 index 0000000..c7240b3 --- /dev/null +++ b/shadowsocks/crypto/aead.py @@ -0,0 +1,340 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Void Copyright NO ONE +# +# Void License +# +# The code belongs to no one. Do whatever you want. +# Forget about boring open source license. +# +# AEAD cipher for shadowsocks +# + +from __future__ import absolute_import, division, print_function, \ + with_statement + +from ctypes import c_int, create_string_buffer, byref, c_void_p + +import hashlib +from struct import pack, unpack + +from shadowsocks.crypto import util +from shadowsocks.crypto import hkdf +from shadowsocks.common import ord, chr + + +EVP_CTRL_GCM_SET_IVLEN = 0x9 +EVP_CTRL_GCM_GET_TAG = 0x10 +EVP_CTRL_GCM_SET_TAG = 0x11 +EVP_CTRL_CCM_SET_IVLEN = EVP_CTRL_GCM_SET_IVLEN +EVP_CTRL_CCM_GET_TAG = EVP_CTRL_GCM_GET_TAG +EVP_CTRL_CCM_SET_TAG = EVP_CTRL_GCM_SET_TAG + +EVP_CTRL_AEAD_SET_IVLEN = EVP_CTRL_GCM_SET_IVLEN +EVP_CTRL_AEAD_SET_TAG = EVP_CTRL_GCM_SET_TAG +EVP_CTRL_AEAD_GET_TAG = EVP_CTRL_GCM_GET_TAG + +AEAD_MSG_LEN_UNKNOWN = 0 +AEAD_CHUNK_SIZE_LEN = 2 +AEAD_CHUNK_SIZE_MASK = 0x3FFF + +CIPHER_NONCE_LEN = { + 'aes-128-gcm': 12, + 'aes-192-gcm': 12, + 'aes-256-gcm': 12, + 'aes-128-ocb': 12, # requires openssl 1.1 + 'aes-192-ocb': 12, + 'aes-256-ocb': 12, + 'chacha20-poly1305': 12, + 'chacha20-ietf-poly1305': 12, + 'xchacha20-ietf-poly1305': 24, + 'sodium:aes-256-gcm': 12, +} + +CIPHER_TAG_LEN = { + 'aes-128-gcm': 16, + 'aes-192-gcm': 16, + 'aes-256-gcm': 16, + 'aes-128-ocb': 16, # requires openssl 1.1 + 'aes-192-ocb': 16, + 'aes-256-ocb': 16, + 'chacha20-poly1305': 16, + 'chacha20-ietf-poly1305': 16, + 'xchacha20-ietf-poly1305': 16, + 'sodium:aes-256-gcm': 16, +} + +SUBKEY_INFO = b"ss-subkey" + +libsodium = None +sodium_loaded = False + + +def load_sodium(path=None): + """ + Load libsodium helpers for nonce increment + :return: None + """ + global libsodium, sodium_loaded + + libsodium = util.find_library('sodium', 'sodium_increment', + 'libsodium', path) + if libsodium is None: + print('load libsodium failed with path %s' % path) + return + + if libsodium.sodium_init() < 0: + libsodium = None + print('sodium init failed') + return + + libsodium.sodium_increment.restype = c_void_p + libsodium.sodium_increment.argtypes = ( + c_void_p, c_int + ) + + sodium_loaded = True + return + + +def nonce_increment(nonce, nlen): + """ + Increase nonce by 1 in little endian + From libsodium sodium_increment(): + for (; i < nlen; i++) { + c += (uint_fast16_t) n[i]; + n[i] = (unsigned char) c; + c >>= 8; + } + :param nonce: string_buffer nonce + :param nlen: nonce length + :return: nonce plus by 1 + """ + c = 1 + i = 0 + # n = create_string_buffer(nlen) + while i < nlen: + c += ord(nonce[i]) + nonce[i] = chr(c & 0xFF) + c >>= 8 + i += 1 + return # n.raw + + +class AeadCryptoBase(object): + """ + Handles basic aead process of shadowsocks protocol + + TCP Chunk (after encryption, *ciphertext*) + +--------------+---------------+--------------+------------+ + | *DataLen* | DataLen_TAG | *Data* | Data_TAG | + +--------------+---------------+--------------+------------+ + | 2 | Fixed | Variable | Fixed | + +--------------+---------------+--------------+------------+ + + UDP (after encryption, *ciphertext*) + +--------+-----------+-----------+ + | NONCE | *Data* | Data_TAG | + +-------+-----------+-----------+ + | Fixed | Variable | Fixed | + +--------+-----------+-----------+ + """ + + def __init__(self, cipher_name, key, iv, op, crypto_path=None): + self._op = int(op) + self._salt = iv + self._nlen = CIPHER_NONCE_LEN[cipher_name] + self._nonce = create_string_buffer(self._nlen) + self._tlen = CIPHER_TAG_LEN[cipher_name] + + crypto_hkdf = hkdf.Hkdf(iv, key, algorithm=hashlib.sha1) + self._skey = crypto_hkdf.expand(info=SUBKEY_INFO, length=len(key)) + # _chunk['mlen']: + # -1, waiting data len header + # n, n > 0, waiting data + self._chunk = {'mlen': AEAD_MSG_LEN_UNKNOWN, 'data': b''} + + # load libsodium for nonce increment + if not sodium_loaded: + crypto_path = dict(crypto_path) if crypto_path else dict() + path = crypto_path.get('sodium', None) + load_sodium(path) + + def nonce_increment(self): + """ + AEAD ciphers need nonce to be unique per key + TODO: cache and check unique + :return: None + """ + global libsodium, sodium_loaded + if sodium_loaded: + libsodium.sodium_increment(byref(self._nonce), c_int(self._nlen)) + else: + nonce_increment(self._nonce, self._nlen) + # print("".join("%02x" % ord(b) for b in self._nonce)) + + def cipher_ctx_init(self): + """ + Increase nonce to make it unique for the same key + :return: None + """ + self.nonce_increment() + + def aead_encrypt(self, data): + """ + Encrypt data with authenticate tag + + :param data: plain text + :return: str [payload][tag] cipher text with tag + """ + raise Exception("Must implement aead_encrypt method") + + def encrypt_chunk(self, data): + """ + Encrypt a chunk for TCP chunks + + :param data: str + :return: str [len][tag][payload][tag] + """ + plen = len(data) + # l = AEAD_CHUNK_SIZE_LEN + plen + self._tlen * 2 + + # network byte order + ctext = [self.aead_encrypt(pack("!H", plen & AEAD_CHUNK_SIZE_MASK))] + if len(ctext[0]) != AEAD_CHUNK_SIZE_LEN + self._tlen: + self.clean() + raise Exception("size length invalid") + + ctext.append(self.aead_encrypt(data)) + if len(ctext[1]) != plen + self._tlen: + self.clean() + raise Exception("data length invalid") + + return b''.join(ctext) + + def encrypt(self, data): + """ + Encrypt data, for TCP divided into chunks + For UDP data, call aead_encrypt instead + + :param data: str data bytes + :return: str encrypted data + """ + plen = len(data) + if plen <= AEAD_CHUNK_SIZE_MASK: + ctext = self.encrypt_chunk(data) + return ctext + ctext = [] + while plen > 0: + mlen = plen if plen < AEAD_CHUNK_SIZE_MASK \ + else AEAD_CHUNK_SIZE_MASK + c = self.encrypt_chunk(data[:mlen]) + ctext.append(c) + data = data[mlen:] + plen -= mlen + + return b''.join(ctext) + + def aead_decrypt(self, data): + """ + Decrypt data and authenticate tag + + :param data: str [len][tag][payload][tag] cipher text with tag + :return: str plain text + """ + raise Exception("Must implement aead_decrypt method") + + def decrypt_chunk_size(self, data): + """ + Decrypt chunk size + + :param data: str [size][tag] encrypted chunk payload len + :return: (int, str) msg length and remaining encrypted data + """ + if self._chunk['mlen'] > 0: + return self._chunk['mlen'], data + data = self._chunk['data'] + data + self._chunk['data'] = b"" + + hlen = AEAD_CHUNK_SIZE_LEN + self._tlen + if hlen > len(data): + self._chunk['data'] = data + return 0, b"" + plen = self.aead_decrypt(data[:hlen]) + plen, = unpack("!H", plen) + if plen & AEAD_CHUNK_SIZE_MASK != plen or plen <= 0: + self.clean() + raise Exception('Invalid message length') + + return plen, data[hlen:] + + def decrypt_chunk_payload(self, plen, data): + """ + Decrypted encrypted msg payload + + :param plen: int payload length + :param data: str [payload][tag][[len][tag]....] encrypted data + :return: (str, str) plain text and remaining encrypted data + """ + data = self._chunk['data'] + data + if len(data) < plen + self._tlen: + self._chunk['mlen'] = plen + self._chunk['data'] = data + return b"", b"" + self._chunk['mlen'] = AEAD_MSG_LEN_UNKNOWN + self._chunk['data'] = b"" + + plaintext = self.aead_decrypt(data[:plen + self._tlen]) + + if len(plaintext) != plen: + self.clean() + raise Exception("plaintext length invalid") + + return plaintext, data[plen + self._tlen:] + + def decrypt_chunk(self, data): + """ + Decrypt a TCP chunk + + :param data: str [len][tag][payload][tag][[len][tag]...] encrypted msg + :return: (str, str) decrypted msg and remaining encrypted data + """ + plen, data = self.decrypt_chunk_size(data) + if plen <= 0: + return b"", b"" + return self.decrypt_chunk_payload(plen, data) + + def decrypt(self, data): + """ + Decrypt data for TCP data divided into chunks + For UDP data, call aead_decrypt instead + + :param data: str + :return: str + """ + ptext = [] + pnext, left = self.decrypt_chunk(data) + ptext.append(pnext) + while len(left) > 0: + pnext, left = self.decrypt_chunk(left) + ptext.append(pnext) + return b''.join(ptext) + + +def test_nonce_increment(): + buf = create_string_buffer(12) + print("".join("%02x" % ord(b) for b in buf)) + nonce_increment(buf, 12) + nonce_increment(buf, 12) + nonce_increment(buf, 12) + nonce_increment(buf, 12) + print("".join("%02x" % ord(b) for b in buf)) + for i in range(256): + nonce_increment(buf, 12) + print("".join("%02x" % ord(b) for b in buf)) + + +if __name__ == '__main__': + load_sodium() + test_nonce_increment() diff --git a/shadowsocks/crypto/hkdf.py b/shadowsocks/crypto/hkdf.py new file mode 100644 index 0000000..11998e6 --- /dev/null +++ b/shadowsocks/crypto/hkdf.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Void Copyright NO ONE +# +# Void License +# +# The code belongs to no one. Do whatever you want. +# Forget about boring open source license. +# +# HKDF for AEAD ciphers +# + +from __future__ import division + +import hmac +import hashlib +import sys + +if sys.version_info[0] == 3: + def buffer(x): + return x + + +def hkdf_extract(salt, input_key_material, algorithm=hashlib.sha256): + """ + Extract a pseudorandom key suitable for use with hkdf_expand + from the input_key_material and a salt using HMAC with the + provided hash (default SHA-256). + + salt should be a random, application-specific byte string. If + salt is None or the empty string, an all-zeros string of the same + length as the hash's block size will be used instead per the RFC. + + See the HKDF draft RFC and paper for usage notes. + """ + hash_len = algorithm().digest_size + if salt is None or len(salt) == 0: + salt = bytearray((0,) * hash_len) + return hmac.new(bytes(salt), buffer(input_key_material), algorithm)\ + .digest() + + +def hkdf_expand(pseudo_random_key, info=b"", length=32, + algorithm=hashlib.sha256): + """ + Expand `pseudo_random_key` and `info` into a key of length `bytes` using + HKDF's expand function based on HMAC with the provided hash (default + SHA-256). See the HKDF draft RFC and paper for usage notes. + """ + hash_len = algorithm().digest_size + length = int(length) + if length > 255 * hash_len: + raise Exception("Cannot expand to more than 255 * %d = %d " + "bytes using the specified hash function" % + (hash_len, 255 * hash_len)) + blocks_needed = length // hash_len \ + + (0 if length % hash_len == 0 else 1) # ceil + okm = b"" + output_block = b"" + for counter in range(blocks_needed): + output_block = hmac.new( + pseudo_random_key, + buffer(output_block + info + bytearray((counter + 1,))), + algorithm + ).digest() + okm += output_block + return okm[:length] + + +class Hkdf(object): + """ + Wrapper class for HKDF extract and expand functions + """ + + def __init__(self, salt, input_key_material, algorithm=hashlib.sha256): + """ + Extract a pseudorandom key from `salt` and `input_key_material` + arguments. + + See the HKDF draft RFC for guidance on setting these values. + The constructor optionally takes a `algorithm` argument defining + the hash function use, defaulting to hashlib.sha256. + """ + self._hash = algorithm + self._prk = hkdf_extract(salt, input_key_material, self._hash) + + def expand(self, info, length=32): + """ + Generate output key material based on an `info` value + + Arguments: + - info - context to generate the OKM + - length - length in bytes of the key to generate + + See the HKDF draft RFC for guidance. + """ + return hkdf_expand(self._prk, info, length, self._hash) diff --git a/shadowsocks/crypto/openssl.py b/shadowsocks/crypto/openssl.py new file mode 100644 index 0000000..929bd6c --- /dev/null +++ b/shadowsocks/crypto/openssl.py @@ -0,0 +1,461 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +from ctypes import c_char_p, c_int, c_long, byref,\ + create_string_buffer, c_void_p + +from shadowsocks import common +from shadowsocks.crypto import util +from shadowsocks.crypto.aead import AeadCryptoBase, EVP_CTRL_AEAD_SET_IVLEN, \ + EVP_CTRL_AEAD_GET_TAG, EVP_CTRL_AEAD_SET_TAG + +__all__ = ['ciphers'] + +libcrypto = None +loaded = False +libsodium = None + +buf = None +buf_size = 2048 + +ctx_cleanup = None + +CIPHER_ENC_UNCHANGED = -1 + + +def load_openssl(crypto_path=None): + global loaded, libcrypto, libsodium, buf, ctx_cleanup + + crypto_path = dict(crypto_path) if crypto_path else dict() + path = crypto_path.get('openssl', None) + libcrypto = util.find_library(('crypto', 'eay32'), + 'EVP_get_cipherbyname', + 'libcrypto', path) + if libcrypto is None: + raise Exception('libcrypto(OpenSSL) not found with path %s' % path) + + libcrypto.EVP_get_cipherbyname.restype = c_void_p + libcrypto.EVP_CIPHER_CTX_new.restype = c_void_p + + libcrypto.EVP_CipherInit_ex.argtypes = (c_void_p, c_void_p, c_char_p, + c_char_p, c_char_p, c_int) + libcrypto.EVP_CIPHER_CTX_ctrl.argtypes = (c_void_p, c_int, c_int, c_void_p) + + libcrypto.EVP_CipherUpdate.argtypes = (c_void_p, c_void_p, c_void_p, + c_char_p, c_int) + + libcrypto.EVP_CipherFinal_ex.argtypes = (c_void_p, c_void_p, c_void_p) + + try: + libcrypto.EVP_CIPHER_CTX_cleanup.argtypes = (c_void_p,) + ctx_cleanup = libcrypto.EVP_CIPHER_CTX_cleanup + except AttributeError: + libcrypto.EVP_CIPHER_CTX_reset.argtypes = (c_void_p,) + ctx_cleanup = libcrypto.EVP_CIPHER_CTX_reset + libcrypto.EVP_CIPHER_CTX_free.argtypes = (c_void_p,) + + libcrypto.RAND_bytes.restype = c_int + libcrypto.RAND_bytes.argtypes = (c_void_p, c_int) + + if hasattr(libcrypto, 'OpenSSL_add_all_ciphers'): + libcrypto.OpenSSL_add_all_ciphers() + + buf = create_string_buffer(buf_size) + loaded = True + + +def load_cipher(cipher_name): + func_name = b'EVP_' + cipher_name.replace(b'-', b'_') + if bytes != str: + func_name = str(func_name, 'utf-8') + cipher = getattr(libcrypto, func_name, None) + if cipher: + cipher.restype = c_void_p + return cipher() + return None + + +def rand_bytes(length): + if not loaded: + load_openssl() + buf = create_string_buffer(length) + r = libcrypto.RAND_bytes(buf, length) + if r <= 0: + raise Exception('RAND_bytes return error') + return buf.raw + + +class OpenSSLCryptoBase(object): + """ + OpenSSL crypto base class + """ + def __init__(self, cipher_name, crypto_path=None): + self._ctx = None + self._cipher = None + if not loaded: + load_openssl(crypto_path) + cipher_name = common.to_bytes(cipher_name) + cipher = libcrypto.EVP_get_cipherbyname(cipher_name) + if not cipher: + cipher = load_cipher(cipher_name) + if not cipher: + raise Exception('cipher %s not found in libcrypto' % cipher_name) + self._ctx = libcrypto.EVP_CIPHER_CTX_new() + self._cipher = cipher + if not self._ctx: + raise Exception('can not create cipher context') + + def encrypt_once(self, data): + return self.update(data) + + def decrypt_once(self, data): + return self.update(data) + + def update(self, data): + """ + Encrypt/decrypt data + :param data: str + :return: str + """ + global buf_size, buf + cipher_out_len = c_long(0) + l = len(data) + if buf_size < l: + buf_size = l * 2 + buf = create_string_buffer(buf_size) + libcrypto.EVP_CipherUpdate( + self._ctx, byref(buf), + byref(cipher_out_len), c_char_p(data), l + ) + # buf is copied to a str object when we access buf.raw + return buf.raw[:cipher_out_len.value] + + def __del__(self): + self.clean() + + def clean(self): + if self._ctx: + ctx_cleanup(self._ctx) + libcrypto.EVP_CIPHER_CTX_free(self._ctx) + self._ctx = None + + +class OpenSSLAeadCrypto(OpenSSLCryptoBase, AeadCryptoBase): + """ + Implement OpenSSL Aead mode: gcm, ocb + """ + def __init__(self, cipher_name, key, iv, op, crypto_path=None): + OpenSSLCryptoBase.__init__(self, cipher_name, crypto_path) + AeadCryptoBase.__init__(self, cipher_name, key, iv, op, crypto_path) + + key_ptr = c_char_p(self._skey) + r = libcrypto.EVP_CipherInit_ex( + self._ctx, + self._cipher, + None, + key_ptr, None, + c_int(op) + ) + if not r: + self.clean() + raise Exception('can not initialize cipher context') + + r = libcrypto.EVP_CIPHER_CTX_ctrl( + self._ctx, + c_int(EVP_CTRL_AEAD_SET_IVLEN), + c_int(self._nlen), + None + ) + if not r: + self.clean() + raise Exception('Set ivlen failed') + + self.cipher_ctx_init() + + def cipher_ctx_init(self): + """ + Need init cipher context after EVP_CipherFinal_ex to reuse context + :return: None + """ + iv_ptr = c_char_p(self._nonce.raw) + r = libcrypto.EVP_CipherInit_ex( + self._ctx, + None, + None, + None, iv_ptr, + c_int(CIPHER_ENC_UNCHANGED) + ) + if not r: + self.clean() + raise Exception('can not initialize cipher context') + + AeadCryptoBase.nonce_increment(self) + + def set_tag(self, tag): + """ + Set tag before decrypt any data (update) + :param tag: authenticated tag + :return: None + """ + tag_len = self._tlen + r = libcrypto.EVP_CIPHER_CTX_ctrl( + self._ctx, + c_int(EVP_CTRL_AEAD_SET_TAG), + c_int(tag_len), c_char_p(tag) + ) + if not r: + self.clean() + raise Exception('Set tag failed') + + def get_tag(self): + """ + Get authenticated tag, called after EVP_CipherFinal_ex + :return: str + """ + tag_len = self._tlen + tag_buf = create_string_buffer(tag_len) + r = libcrypto.EVP_CIPHER_CTX_ctrl( + self._ctx, + c_int(EVP_CTRL_AEAD_GET_TAG), + c_int(tag_len), byref(tag_buf) + ) + if not r: + self.clean() + raise Exception('Get tag failed') + return tag_buf.raw[:tag_len] + + def final(self): + """ + Finish encrypt/decrypt a chunk (<= 0x3FFF) + :return: str + """ + global buf_size, buf + cipher_out_len = c_long(0) + r = libcrypto.EVP_CipherFinal_ex( + self._ctx, + byref(buf), byref(cipher_out_len) + ) + if not r: + self.clean() + # print(self._nonce.raw, r, cipher_out_len) + raise Exception('Finalize cipher failed') + return buf.raw[:cipher_out_len.value] + + def aead_encrypt(self, data): + """ + Encrypt data with authenticate tag + + :param data: plain text + :return: cipher text with tag + """ + ctext = self.update(data) + self.final() + self.get_tag() + self.cipher_ctx_init() + return ctext + + def aead_decrypt(self, data): + """ + Decrypt data and authenticate tag + + :param data: cipher text with tag + :return: plain text + """ + clen = len(data) + if clen < self._tlen: + self.clean() + raise Exception('Data too short') + + self.set_tag(data[clen - self._tlen:]) + plaintext = self.update(data[:clen - self._tlen]) + self.final() + self.cipher_ctx_init() + return plaintext + + def encrypt_once(self, data): + return self.aead_encrypt(data) + + def decrypt_once(self, data): + return self.aead_decrypt(data) + + +class OpenSSLStreamCrypto(OpenSSLCryptoBase): + """ + Crypto for stream modes: cfb, ofb, ctr + """ + def __init__(self, cipher_name, key, iv, op, crypto_path=None): + OpenSSLCryptoBase.__init__(self, cipher_name, crypto_path) + key_ptr = c_char_p(key) + iv_ptr = c_char_p(iv) + r = libcrypto.EVP_CipherInit_ex(self._ctx, self._cipher, None, + key_ptr, iv_ptr, c_int(op)) + if not r: + self.clean() + raise Exception('can not initialize cipher context') + + def encrypt(self, data): + return self.update(data) + + def decrypt(self, data): + return self.update(data) + + +ciphers = { + 'aes-128-cbc': (16, 16, OpenSSLStreamCrypto), + 'aes-192-cbc': (24, 16, OpenSSLStreamCrypto), + 'aes-256-cbc': (32, 16, OpenSSLStreamCrypto), + 'aes-128-cfb': (16, 16, OpenSSLStreamCrypto), + 'aes-192-cfb': (24, 16, OpenSSLStreamCrypto), + 'aes-256-cfb': (32, 16, OpenSSLStreamCrypto), + 'aes-128-ofb': (16, 16, OpenSSLStreamCrypto), + 'aes-192-ofb': (24, 16, OpenSSLStreamCrypto), + 'aes-256-ofb': (32, 16, OpenSSLStreamCrypto), + 'aes-128-ctr': (16, 16, OpenSSLStreamCrypto), + 'aes-192-ctr': (24, 16, OpenSSLStreamCrypto), + 'aes-256-ctr': (32, 16, OpenSSLStreamCrypto), + 'aes-128-cfb8': (16, 16, OpenSSLStreamCrypto), + 'aes-192-cfb8': (24, 16, OpenSSLStreamCrypto), + 'aes-256-cfb8': (32, 16, OpenSSLStreamCrypto), + 'aes-128-cfb1': (16, 16, OpenSSLStreamCrypto), + 'aes-192-cfb1': (24, 16, OpenSSLStreamCrypto), + 'aes-256-cfb1': (32, 16, OpenSSLStreamCrypto), + 'bf-cfb': (16, 8, OpenSSLStreamCrypto), + 'camellia-128-cfb': (16, 16, OpenSSLStreamCrypto), + 'camellia-192-cfb': (24, 16, OpenSSLStreamCrypto), + 'camellia-256-cfb': (32, 16, OpenSSLStreamCrypto), + 'cast5-cfb': (16, 8, OpenSSLStreamCrypto), + 'des-cfb': (8, 8, OpenSSLStreamCrypto), + 'idea-cfb': (16, 8, OpenSSLStreamCrypto), + 'rc2-cfb': (16, 8, OpenSSLStreamCrypto), + 'rc4': (16, 0, OpenSSLStreamCrypto), + 'seed-cfb': (16, 16, OpenSSLStreamCrypto), + # AEAD: iv_len = salt_len = key_len + 'aes-128-gcm': (16, 16, OpenSSLAeadCrypto), + 'aes-192-gcm': (24, 24, OpenSSLAeadCrypto), + 'aes-256-gcm': (32, 32, OpenSSLAeadCrypto), + 'aes-128-ocb': (16, 16, OpenSSLAeadCrypto), + 'aes-192-ocb': (24, 24, OpenSSLAeadCrypto), + 'aes-256-ocb': (32, 32, OpenSSLAeadCrypto), +} + + +def run_method(method): + + print(method, ': [stream]', 32) + cipher = OpenSSLStreamCrypto(method, b'k' * 32, b'i' * 16, 1) + decipher = OpenSSLStreamCrypto(method, b'k' * 32, b'i' * 16, 0) + + util.run_cipher(cipher, decipher) + + +def run_aead_method(method, key_len=16): + + print(method, ': [payload][tag]', key_len) + cipher = libcrypto.EVP_get_cipherbyname(common.to_bytes(method)) + if not cipher: + cipher = load_cipher(common.to_bytes(method)) + if not cipher: + print('cipher not avaiable, please upgrade openssl') + return + key_len = int(key_len) + cipher = OpenSSLAeadCrypto(method, b'k' * key_len, b'i' * key_len, 1) + decipher = OpenSSLAeadCrypto(method, b'k' * key_len, b'i' * key_len, 0) + + util.run_cipher(cipher, decipher) + + +def run_aead_method_chunk(method, key_len=16): + + print(method, ': chunk([size][tag][payload][tag]', key_len) + cipher = libcrypto.EVP_get_cipherbyname(common.to_bytes(method)) + if not cipher: + cipher = load_cipher(common.to_bytes(method)) + if not cipher: + print('cipher not avaiable, please upgrade openssl') + return + key_len = int(key_len) + cipher = OpenSSLAeadCrypto(method, b'k' * key_len, b'i' * key_len, 1) + decipher = OpenSSLAeadCrypto(method, b'k' * key_len, b'i' * key_len, 0) + + cipher.encrypt_once = cipher.encrypt + decipher.decrypt_once = decipher.decrypt + util.run_cipher(cipher, decipher) + + +def test_aes_gcm(bits=128): + method = "aes-{0}-gcm".format(bits) + run_aead_method(method, bits / 8) + + +def test_aes_ocb(bits=128): + method = "aes-{0}-ocb".format(bits) + run_aead_method(method, bits / 8) + + +def test_aes_gcm_chunk(bits=128): + method = "aes-{0}-gcm".format(bits) + run_aead_method_chunk(method, bits / 8) + + +def test_aes_ocb_chunk(bits=128): + method = "aes-{0}-ocb".format(bits) + run_aead_method_chunk(method, bits / 8) + + +def test_aes_128_cfb(): + run_method('aes-128-cfb') + + +def test_aes_256_cfb(): + run_method('aes-256-cfb') + + +def test_aes_128_cfb8(): + run_method('aes-128-cfb8') + + +def test_aes_256_ofb(): + run_method('aes-256-ofb') + + +def test_aes_256_ctr(): + run_method('aes-256-ctr') + + +def test_bf_cfb(): + run_method('bf-cfb') + + +def test_rc4(): + run_method('rc4') + + +if __name__ == '__main__': + test_aes_128_cfb() + test_aes_256_cfb() + test_aes_256_ofb() + test_aes_gcm(128) + test_aes_gcm(192) + test_aes_gcm(256) + test_aes_gcm_chunk(128) + test_aes_gcm_chunk(192) + test_aes_gcm_chunk(256) + test_aes_ocb(128) + test_aes_ocb(192) + test_aes_ocb(256) + test_aes_ocb_chunk(128) + test_aes_ocb_chunk(192) + test_aes_ocb_chunk(256) diff --git a/shadowsocks/crypto/rc4_md5.py b/shadowsocks/crypto/rc4_md5.py new file mode 100644 index 0000000..a495ddb --- /dev/null +++ b/shadowsocks/crypto/rc4_md5.py @@ -0,0 +1,52 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import hashlib +from shadowsocks.crypto import openssl + +__all__ = ['ciphers'] + + +def create_cipher(alg, key, iv, op, crypto_path=None, + key_as_bytes=0, d=None, salt=None, + i=1, padding=1): + md5 = hashlib.md5() + md5.update(key) + md5.update(iv) + rc4_key = md5.digest() + return openssl.OpenSSLStreamCrypto(b'rc4', rc4_key, b'', op, crypto_path) + + +ciphers = { + 'rc4-md5': (16, 16, create_cipher), + 'rc4-md5-6': (16, 6, create_cipher), +} + + +def test(): + from shadowsocks.crypto import util + + cipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 1) + decipher = create_cipher('rc4-md5', b'k' * 32, b'i' * 16, 0) + + util.run_cipher(cipher, decipher) + + +if __name__ == '__main__': + test() diff --git a/shadowsocks/crypto/sodium.py b/shadowsocks/crypto/sodium.py new file mode 100644 index 0000000..cfbcd10 --- /dev/null +++ b/shadowsocks/crypto/sodium.py @@ -0,0 +1,453 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +from ctypes import c_char_p, c_int, c_uint, c_ulonglong, byref, \ + create_string_buffer, c_void_p + +from shadowsocks.crypto import util +from shadowsocks.crypto import aead +from shadowsocks.crypto.aead import AeadCryptoBase + +__all__ = ['ciphers'] + +libsodium = None +loaded = False + +buf = None +buf_size = 2048 + +# for salsa20 and chacha20 and chacha20-ietf +BLOCK_SIZE = 64 + + +def load_libsodium(crypto_path=None): + global loaded, libsodium, buf + + crypto_path = dict(crypto_path) if crypto_path else dict() + path = crypto_path.get('sodium', None) + + if not aead.sodium_loaded: + aead.load_sodium(path) + + if aead.sodium_loaded: + libsodium = aead.libsodium + else: + print('load libsodium again with path %s' % path) + libsodium = util.find_library('sodium', 'crypto_stream_salsa20_xor_ic', + 'libsodium', path) + if libsodium is None: + raise Exception('libsodium not found') + + if libsodium.sodium_init() < 0: + raise Exception('libsodium init failed') + + libsodium.crypto_stream_salsa20_xor_ic.restype = c_int + libsodium.crypto_stream_salsa20_xor_ic.argtypes = ( + c_void_p, c_char_p, # cipher output, msg + c_ulonglong, # msg len + c_char_p, c_ulonglong, # nonce, uint64_t initial block counter + c_char_p # key + ) + libsodium.crypto_stream_chacha20_xor_ic.restype = c_int + libsodium.crypto_stream_chacha20_xor_ic.argtypes = ( + c_void_p, c_char_p, + c_ulonglong, + c_char_p, c_ulonglong, + c_char_p + ) + if hasattr(libsodium, 'crypto_stream_xchacha20_xor_ic'): + libsodium.crypto_stream_xchacha20_xor_ic.restype = c_int + libsodium.crypto_stream_xchacha20_xor_ic.argtypes = ( + c_void_p, c_char_p, + c_ulonglong, + c_char_p, c_ulonglong, + c_char_p + ) + libsodium.crypto_stream_chacha20_ietf_xor_ic.restype = c_int + libsodium.crypto_stream_chacha20_ietf_xor_ic.argtypes = ( + c_void_p, c_char_p, + c_ulonglong, + c_char_p, + c_uint, # uint32_t initial counter + c_char_p + ) + + # chacha20-poly1305 + libsodium.crypto_aead_chacha20poly1305_encrypt.restype = c_int + libsodium.crypto_aead_chacha20poly1305_encrypt.argtypes = ( + c_void_p, c_void_p, # c, clen + c_char_p, c_ulonglong, # m, mlen + c_char_p, c_ulonglong, # ad, adlen + c_char_p, # nsec, not used + c_char_p, c_char_p # npub, k + ) + libsodium.crypto_aead_chacha20poly1305_decrypt.restype = c_int + libsodium.crypto_aead_chacha20poly1305_decrypt.argtypes = ( + c_void_p, c_void_p, # m, mlen + c_char_p, # nsec, not used + c_char_p, c_ulonglong, # c, clen + c_char_p, c_ulonglong, # ad, adlen + c_char_p, c_char_p # npub, k + ) + + # chacha20-ietf-poly1305, same api structure as above + libsodium.crypto_aead_chacha20poly1305_ietf_encrypt.restype = c_int + libsodium.crypto_aead_chacha20poly1305_ietf_encrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, + c_char_p, c_char_p + ) + libsodium.crypto_aead_chacha20poly1305_ietf_decrypt.restype = c_int + libsodium.crypto_aead_chacha20poly1305_ietf_decrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, c_char_p + ) + + # xchacha20-ietf-poly1305, same api structure as above + if hasattr(libsodium, 'crypto_aead_xchacha20poly1305_ietf_encrypt'): + libsodium.crypto_aead_xchacha20poly1305_ietf_encrypt.restype = c_int + libsodium.crypto_aead_xchacha20poly1305_ietf_encrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, + c_char_p, c_char_p + ) + + libsodium.crypto_aead_xchacha20poly1305_ietf_decrypt.restype = c_int + libsodium.crypto_aead_xchacha20poly1305_ietf_decrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, c_char_p + ) + + # aes-256-gcm, same api structure as above + libsodium.crypto_aead_aes256gcm_is_available.restype = c_int + + if libsodium.crypto_aead_aes256gcm_is_available(): + libsodium.crypto_aead_aes256gcm_encrypt.restype = c_int + libsodium.crypto_aead_aes256gcm_encrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, + c_char_p, c_char_p + ) + libsodium.crypto_aead_aes256gcm_decrypt.restype = c_int + libsodium.crypto_aead_aes256gcm_decrypt.argtypes = ( + c_void_p, c_void_p, + c_char_p, + c_char_p, c_ulonglong, + c_char_p, c_ulonglong, + c_char_p, c_char_p + ) + + buf = create_string_buffer(buf_size) + loaded = True + + +class SodiumCrypto(object): + def __init__(self, cipher_name, key, iv, op, crypto_path=None): + if not loaded: + load_libsodium(crypto_path) + self.key = key + self.iv = iv + self.key_ptr = c_char_p(key) + self.iv_ptr = c_char_p(iv) + if cipher_name == 'salsa20': + self.cipher = libsodium.crypto_stream_salsa20_xor_ic + elif cipher_name == 'xsalsa20': + self.cipher = libsodium.crypto_stream_xsalsa20_xor_ic + elif cipher_name == 'chacha20': + self.cipher = libsodium.crypto_stream_chacha20_xor_ic + elif cipher_name == 'xchacha20': + if hasattr(libsodium, 'crypto_stream_xchacha20_xor_ic'): + self.cipher = libsodium.crypto_stream_xchacha20_xor_ic + else: + raise Exception('Unsupported cipher') + elif cipher_name == 'chacha20-ietf': + self.cipher = libsodium.crypto_stream_chacha20_ietf_xor_ic + else: + raise Exception('Unknown cipher') + # byte counter, not block counter + self.counter = 0 + + def encrypt_once(self, data): + return self.update(data) + + def decrypt_once(self, data): + return self.update(data) + + def encrypt(self, data): + return self.update(data) + + def decrypt(self, data): + return self.update(data) + + def update(self, data): + global buf_size, buf + l = len(data) + + # we can only prepend some padding to make the encryption align to + # blocks + padding = self.counter % BLOCK_SIZE + if buf_size < padding + l: + buf_size = (padding + l) * 2 + buf = create_string_buffer(buf_size) + + if padding: + data = (b'\0' * padding) + data + self.cipher(byref(buf), c_char_p(data), padding + l, + self.iv_ptr, int(self.counter / BLOCK_SIZE), self.key_ptr) + self.counter += l + # buf is copied to a str object when we access buf.raw + # strip off the padding + return buf.raw[padding:padding + l] + + def clean(self): + pass + + +class SodiumAeadCrypto(AeadCryptoBase): + def __init__(self, cipher_name, key, iv, op, crypto_path=None): + if not loaded: + load_libsodium(crypto_path) + AeadCryptoBase.__init__(self, cipher_name, key, iv, op, crypto_path) + + if cipher_name == 'chacha20-poly1305': + self.encryptor = libsodium.crypto_aead_chacha20poly1305_encrypt + self.decryptor = libsodium.crypto_aead_chacha20poly1305_decrypt + elif cipher_name == 'chacha20-ietf-poly1305': + self.encryptor = libsodium. \ + crypto_aead_chacha20poly1305_ietf_encrypt + self.decryptor = libsodium. \ + crypto_aead_chacha20poly1305_ietf_decrypt + elif cipher_name == 'xchacha20-ietf-poly1305': + if hasattr(libsodium, + 'crypto_aead_xchacha20poly1305_ietf_encrypt'): + self.encryptor = libsodium. \ + crypto_aead_xchacha20poly1305_ietf_encrypt + self.decryptor = libsodium. \ + crypto_aead_xchacha20poly1305_ietf_decrypt + else: + raise Exception('Unsupported cipher') + elif cipher_name == 'sodium:aes-256-gcm': + if hasattr(libsodium, 'crypto_aead_aes256gcm_encrypt'): + self.encryptor = libsodium.crypto_aead_aes256gcm_encrypt + self.decryptor = libsodium.crypto_aead_aes256gcm_decrypt + else: + raise Exception('Unsupported cipher') + else: + raise Exception('Unknown cipher') + + def cipher_ctx_init(self): + global libsodium + libsodium.sodium_increment(byref(self._nonce), c_int(self._nlen)) + # print("".join("%02x" % ord(b) for b in self._nonce)) + + def aead_encrypt(self, data): + global buf, buf_size + plen = len(data) + if buf_size < plen + self._tlen: + buf_size = (plen + self._tlen) * 2 + buf = create_string_buffer(buf_size) + cipher_out_len = c_ulonglong(0) + self.encryptor( + byref(buf), byref(cipher_out_len), + c_char_p(data), c_ulonglong(plen), + None, c_ulonglong(0), None, + c_char_p(self._nonce.raw), c_char_p(self._skey) + ) + if cipher_out_len.value != plen + self._tlen: + raise Exception("Encrypt failed") + + self.cipher_ctx_init() + return buf.raw[:cipher_out_len.value] + + def aead_decrypt(self, data): + global buf, buf_size + clen = len(data) + if buf_size < clen: + buf_size = clen * 2 + buf = create_string_buffer(buf_size) + cipher_out_len = c_ulonglong(0) + r = self.decryptor( + byref(buf), byref(cipher_out_len), + None, + c_char_p(data), c_ulonglong(clen), + None, c_ulonglong(0), + c_char_p(self._nonce.raw), c_char_p(self._skey) + ) + if r != 0: + raise Exception("Decrypt failed") + + if cipher_out_len.value != clen - self._tlen: + raise Exception("Decrypt failed") + + self.cipher_ctx_init() + return buf.raw[:cipher_out_len.value] + + def encrypt_once(self, data): + return self.aead_encrypt(data) + + def decrypt_once(self, data): + return self.aead_decrypt(data) + + +ciphers = { + 'salsa20': (32, 8, SodiumCrypto), + 'xsalsa20': (32, 24, SodiumCrypto), + 'chacha20': (32, 8, SodiumCrypto), + 'xchacha20': (32, 24, SodiumCrypto), + 'chacha20-ietf': (32, 12, SodiumCrypto), + # AEAD: iv_len = salt_len = key_len + 'chacha20-poly1305': (32, 32, SodiumAeadCrypto), + 'chacha20-ietf-poly1305': (32, 32, SodiumAeadCrypto), + 'xchacha20-ietf-poly1305': (32, 32, SodiumAeadCrypto), + 'sodium:aes-256-gcm': (32, 32, SodiumAeadCrypto), +} + + +def test_chacha20(): + print("Test chacha20") + cipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 1) + decipher = SodiumCrypto('chacha20', b'k' * 32, b'i' * 16, 0) + + util.run_cipher(cipher, decipher) + + +def test_xchacha20(): + print("Test xchacha20") + cipher = SodiumCrypto('xchacha20', b'k' * 32, b'i' * 24, 1) + decipher = SodiumCrypto('xchacha20', b'k' * 32, b'i' * 24, 0) + + util.run_cipher(cipher, decipher) + + +def test_salsa20(): + print("Test salsa20") + cipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 1) + decipher = SodiumCrypto('salsa20', b'k' * 32, b'i' * 16, 0) + + util.run_cipher(cipher, decipher) + + +def test_xsalsa20(): + cipher = SodiumCrypto('xsalsa20', b'k' * 32, b'i' * 24, 1) + decipher = SodiumCrypto('xsalsa20', b'k' * 32, b'i' * 24, 0) + + util.run_cipher(cipher, decipher) + + +def test_chacha20_ietf(): + print("Test chacha20-ietf") + cipher = SodiumCrypto('chacha20-ietf', b'k' * 32, b'i' * 16, 1) + decipher = SodiumCrypto('chacha20-ietf', b'k' * 32, b'i' * 16, 0) + + util.run_cipher(cipher, decipher) + + +def test_chacha20_poly1305(): + print("Test chacha20-poly1305 [payload][tag]") + cipher = SodiumAeadCrypto('chacha20-poly1305', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('chacha20-poly1305', + b'k' * 32, b'i' * 32, 0) + + util.run_cipher(cipher, decipher) + + +def test_chacha20_poly1305_chunk(): + print("Test chacha20-poly1305 chunk [size][tag][payload][tag]") + cipher = SodiumAeadCrypto('chacha20-poly1305', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('chacha20-poly1305', + b'k' * 32, b'i' * 32, 0) + + cipher.encrypt_once = cipher.encrypt + decipher.decrypt_once = decipher.decrypt + + util.run_cipher(cipher, decipher) + + +def test_chacha20_ietf_poly1305(): + print("Test chacha20-ietf-poly1305 [payload][tag]") + cipher = SodiumAeadCrypto('chacha20-ietf-poly1305', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('chacha20-ietf-poly1305', + b'k' * 32, b'i' * 32, 0) + + util.run_cipher(cipher, decipher) + + +def test_chacha20_ietf_poly1305_chunk(): + print("Test chacha20-ietf-poly1305 chunk [size][tag][payload][tag]") + cipher = SodiumAeadCrypto('chacha20-ietf-poly1305', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('chacha20-ietf-poly1305', + b'k' * 32, b'i' * 32, 0) + + cipher.encrypt_once = cipher.encrypt + decipher.decrypt_once = decipher.decrypt + + util.run_cipher(cipher, decipher) + + +def test_aes_256_gcm(): + print("Test sodium:aes-256-gcm [payload][tag]") + cipher = SodiumAeadCrypto('sodium:aes-256-gcm', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('sodium:aes-256-gcm', + b'k' * 32, b'i' * 32, 0) + + util.run_cipher(cipher, decipher) + + +def test_aes_256_gcm_chunk(): + print("Test sodium:aes-256-gcm chunk [size][tag][payload][tag]") + cipher = SodiumAeadCrypto('sodium:aes-256-gcm', + b'k' * 32, b'i' * 32, 1) + decipher = SodiumAeadCrypto('sodium:aes-256-gcm', + b'k' * 32, b'i' * 32, 0) + + cipher.encrypt_once = cipher.encrypt + decipher.decrypt_once = decipher.decrypt + + util.run_cipher(cipher, decipher) + + +if __name__ == '__main__': + test_chacha20() + test_xchacha20() + test_salsa20() + test_xsalsa20() + test_chacha20_ietf() + test_chacha20_poly1305() + test_chacha20_poly1305_chunk() + test_chacha20_ietf_poly1305() + test_chacha20_ietf_poly1305_chunk() + test_aes_256_gcm() + test_aes_256_gcm_chunk() diff --git a/shadowsocks/crypto/table.py b/shadowsocks/crypto/table.py new file mode 100644 index 0000000..34f390c --- /dev/null +++ b/shadowsocks/crypto/table.py @@ -0,0 +1,189 @@ +# !/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import string +import struct +import hashlib + + +__all__ = ['ciphers'] + +cached_tables = {} + +if hasattr(string, 'maketrans'): + maketrans = string.maketrans + translate = string.translate +else: + maketrans = bytes.maketrans + translate = bytes.translate + + +def get_table(key): + m = hashlib.md5() + m.update(key) + s = m.digest() + a, b = struct.unpack(' 0: + return cipher_nme[hyphen:] + return None + + +def run_cipher(cipher, decipher): + from os import urandom + import random + import time + + block_size = 16384 + rounds = 1 * 1024 + plain = urandom(block_size * rounds) + + cipher_results = [] + pos = 0 + print('test start') + start = time.time() + while pos < len(plain): + l = random.randint(100, 32768) + # print(pos, l) + c = cipher.encrypt_once(plain[pos:pos + l]) + cipher_results.append(c) + pos += l + pos = 0 + # c = b''.join(cipher_results) + plain_results = [] + for c in cipher_results: + # l = random.randint(100, 32768) + l = len(c) + plain_results.append(decipher.decrypt_once(c)) + pos += l + end = time.time() + print('speed: %d bytes/s' % (block_size * rounds / (end - start))) + assert b''.join(plain_results) == plain + + +def test_find_library(): + assert find_library('c', 'strcpy', 'libc') is not None + assert find_library(['c'], 'strcpy', 'libc') is not None + assert find_library(('c',), 'strcpy', 'libc') is not None + assert find_library(('crypto', 'eay32'), 'EVP_CipherUpdate', + 'libcrypto') is not None + assert find_library('notexist', 'strcpy', 'libnotexist') is None + assert find_library('c', 'symbol_not_exist', 'c') is None + assert find_library(('notexist', 'c', 'crypto', 'eay32'), + 'EVP_CipherUpdate', 'libc') is not None + + +if __name__ == '__main__': + test_find_library() diff --git a/shadowsocks/daemon.py b/shadowsocks/daemon.py new file mode 100644 index 0000000..8dc5608 --- /dev/null +++ b/shadowsocks/daemon.py @@ -0,0 +1,208 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2014-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import logging +import signal +import time +from shadowsocks import common, shell + +# this module is ported from ShadowVPN daemon.c + + +def daemon_exec(config): + if 'daemon' in config: + if os.name != 'posix': + raise Exception('daemon mode is only supported on Unix') + command = config['daemon'] + if not command: + command = 'start' + pid_file = config['pid-file'] + log_file = config['log-file'] + if command == 'start': + daemon_start(pid_file, log_file) + elif command == 'stop': + daemon_stop(pid_file) + # always exit after daemon_stop + sys.exit(0) + elif command == 'restart': + daemon_stop(pid_file) + daemon_start(pid_file, log_file) + else: + raise Exception('unsupported daemon command %s' % command) + + +def write_pid_file(pid_file, pid): + import fcntl + import stat + + try: + fd = os.open(pid_file, os.O_RDWR | os.O_CREAT, + stat.S_IRUSR | stat.S_IWUSR) + except OSError as e: + shell.print_exception(e) + return -1 + flags = fcntl.fcntl(fd, fcntl.F_GETFD) + assert flags != -1 + flags |= fcntl.FD_CLOEXEC + r = fcntl.fcntl(fd, fcntl.F_SETFD, flags) + assert r != -1 + # There is no platform independent way to implement fcntl(fd, F_SETLK, &fl) + # via fcntl.fcntl. So use lockf instead + try: + fcntl.lockf(fd, fcntl.LOCK_EX | fcntl.LOCK_NB, 0, 0, os.SEEK_SET) + except IOError: + r = os.read(fd, 32) + if r: + logging.error('already started at pid %s' % common.to_str(r)) + else: + logging.error('already started') + os.close(fd) + return -1 + os.ftruncate(fd, 0) + os.write(fd, common.to_bytes(str(pid))) + return 0 + + +def freopen(f, mode, stream): + oldf = open(f, mode) + oldfd = oldf.fileno() + newfd = stream.fileno() + os.close(newfd) + os.dup2(oldfd, newfd) + + +def daemon_start(pid_file, log_file): + + def handle_exit(signum, _): + if signum == signal.SIGTERM: + sys.exit(0) + sys.exit(1) + + signal.signal(signal.SIGINT, handle_exit) + signal.signal(signal.SIGTERM, handle_exit) + + # fork only once because we are sure parent will exit + pid = os.fork() + assert pid != -1 + + if pid > 0: + # parent waits for its child + time.sleep(5) + sys.exit(0) + + # child signals its parent to exit + ppid = os.getppid() + pid = os.getpid() + if write_pid_file(pid_file, pid) != 0: + os.kill(ppid, signal.SIGINT) + sys.exit(1) + + os.setsid() + signal.signal(signal.SIG_IGN, signal.SIGHUP) + + print('started') + os.kill(ppid, signal.SIGTERM) + + sys.stdin.close() + try: + freopen(log_file, 'a', sys.stdout) + freopen(log_file, 'a', sys.stderr) + except IOError as e: + shell.print_exception(e) + sys.exit(1) + + +def daemon_stop(pid_file): + import errno + try: + with open(pid_file) as f: + buf = f.read() + pid = common.to_str(buf) + if not buf: + logging.error('not running') + except IOError as e: + shell.print_exception(e) + if e.errno == errno.ENOENT: + # always exit 0 if we are sure daemon is not running + logging.error('not running') + return + sys.exit(1) + pid = int(pid) + if pid > 0: + try: + os.kill(pid, signal.SIGTERM) + except OSError as e: + if e.errno == errno.ESRCH: + logging.error('not running') + # always exit 0 if we are sure daemon is not running + return + shell.print_exception(e) + sys.exit(1) + else: + logging.error('pid is not positive: %d', pid) + + # sleep for maximum 10s + for i in range(0, 200): + try: + # query for the pid + os.kill(pid, 0) + except OSError as e: + if e.errno == errno.ESRCH: + break + time.sleep(0.05) + else: + logging.error('timed out when stopping pid %d', pid) + sys.exit(1) + print('stopped') + os.unlink(pid_file) + + +def set_user(username): + if username is None: + return + + import pwd + import grp + + try: + pwrec = pwd.getpwnam(username) + except KeyError: + logging.error('user not found: %s' % username) + raise + user = pwrec[0] + uid = pwrec[2] + gid = pwrec[3] + + cur_uid = os.getuid() + if uid == cur_uid: + return + if cur_uid != 0: + logging.error('can not set user as nonroot user') + # will raise later + + # inspired by supervisor + if hasattr(os, 'setgroups'): + groups = [grprec[2] for grprec in grp.getgrall() if user in grprec[3]] + groups.insert(0, gid) + os.setgroups(groups) + os.setgid(gid) + os.setuid(uid) diff --git a/shadowsocks/encrypt.py b/shadowsocks/encrypt.py new file mode 100644 index 0000000..8c114fd --- /dev/null +++ b/shadowsocks/encrypt.py @@ -0,0 +1,275 @@ +#!/usr/bin/env python +# +# Copyright 2012-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging + +from shadowsocks import common +from shadowsocks.crypto import rc4_md5, openssl, sodium, table + + +CIPHER_ENC_ENCRYPTION = 1 +CIPHER_ENC_DECRYPTION = 0 + +METHOD_INFO_KEY_LEN = 0 +METHOD_INFO_IV_LEN = 1 +METHOD_INFO_CRYPTO = 2 + +method_supported = {} +method_supported.update(rc4_md5.ciphers) +method_supported.update(openssl.ciphers) +method_supported.update(sodium.ciphers) +method_supported.update(table.ciphers) + + +def random_string(length): + return os.urandom(length) + +cached_keys = {} + + +def try_cipher(key, method=None, crypto_path=None): + Encryptor(key, method, crypto_path) + + +def EVP_BytesToKey(password, key_len, iv_len): + # equivalent to OpenSSL's EVP_BytesToKey() with count 1 + # so that we make the same key and iv as nodejs version + cached_key = '%s-%d-%d' % (password, key_len, iv_len) + r = cached_keys.get(cached_key, None) + if r: + return r + m = [] + i = 0 + while len(b''.join(m)) < (key_len + iv_len): + md5 = hashlib.md5() + data = password + if i > 0: + data = m[i - 1] + password + md5.update(data) + m.append(md5.digest()) + i += 1 + ms = b''.join(m) + key = ms[:key_len] + iv = ms[key_len:key_len + iv_len] + cached_keys[cached_key] = (key, iv) + return key, iv + + +class Encryptor(object): + def __init__(self, password, method, crypto_path=None, iv=None): + """ + Crypto wrapper + :param password: str cipher password + :param method: str cipher + :param crypto_path: dict or none + {'openssl': path, 'sodium': path, 'mbedtls': path} + """ + self.password = password + self.key = None + self.method = method + self.iv_sent = False + self.cipher_iv = b'' + self.decipher = None + self.decipher_iv = None + self.crypto_path = crypto_path + method = method.lower() + self._method_info = Encryptor.get_method_info(method) + if self._method_info: + if iv is None or len(iv) != self._method_info[1]: + self.cipher = self.get_cipher( + password, method, CIPHER_ENC_ENCRYPTION, + random_string(self._method_info[METHOD_INFO_IV_LEN]) + ) + else: + self.cipher = self.get_cipher(key, method, 1, iv) + else: + logging.error('method %s not supported' % method) + # sys.exit(1) + raise Exception('method not supported') + + @staticmethod + def get_method_info(method): + method = method.lower() + m = method_supported.get(method) + return m + + def iv_len(self): + return len(self.cipher_iv) + + def get_cipher(self, password, method, op, iv): + password = common.to_bytes(password) + m = self._method_info + if m[METHOD_INFO_KEY_LEN] > 0: + key, _ = EVP_BytesToKey(password, + m[METHOD_INFO_KEY_LEN], + m[METHOD_INFO_IV_LEN]) + else: + # key_length == 0 indicates we should use the key directly + key, iv = password, b'' + self.key = key + iv = iv[:m[METHOD_INFO_IV_LEN]] + if op == CIPHER_ENC_ENCRYPTION: + # this iv is for cipher not decipher + self.cipher_iv = iv + return m[METHOD_INFO_CRYPTO](method, key, iv, op, self.crypto_path) + + def encrypt(self, buf): + if len(buf) == 0: + return buf + if self.iv_sent: + return self.cipher.encrypt(buf) + else: + self.iv_sent = True + return self.cipher_iv + self.cipher.encrypt(buf) + + def decrypt(self, buf): + if len(buf) == 0: + return buf + if self.decipher is None: + decipher_iv_len = self._method_info[METHOD_INFO_IV_LEN] + decipher_iv = buf[:decipher_iv_len] + self.decipher_iv = decipher_iv + self.decipher = self.get_cipher( + self.password, self.method, + CIPHER_ENC_DECRYPTION, + decipher_iv + ) + buf = buf[decipher_iv_len:] + if len(buf) == 0: + return buf + return self.decipher.decrypt(buf) + + +def gen_key_iv(password, method): + method = method.lower() + if method not in method_supported: + raise Exception('method not supported') + (key_len, iv_len, m) = method_supported[method] + if key_len > 0: + key, _ = EVP_BytesToKey(password, key_len, iv_len) + else: + key = password + iv = random_string(iv_len) + + return key, iv, m + + +def encrypt_all_m(key, iv, m, method, data, crypto_path=None): + result = [iv] + cipher = m(method, key, iv, 1, crypto_path) + result.append(cipher.encrypt_once(data)) + return b''.join(result) + + +def decrypt_all(password, method, data, crypto_path=None): + result = [] + method = method.lower() + (key, iv, m) = gen_key_iv(password, method) + iv = data[:len(iv)] + data = data[len(iv):] + cipher = m(method, key, iv, CIPHER_ENC_DECRYPTION, crypto_path) + result.append(cipher.decrypt_once(data)) + return b''.join(result), key, iv + + +def encrypt_all(password, method, data, crypto_path=None): + result = [] + method = method.lower() + (key, iv, m) = gen_key_iv(password, method) + result.append(iv) + cipher = m(method, key, iv, CIPHER_ENC_ENCRYPTION, crypto_path) + result.append(cipher.encrypt_once(data)) + return b''.join(result) + +def encrypt_key(password, method): + method = method.lower() + if method not in method_supported: + raise Exception('method not supported') + (key_len, iv_len, m) = method_supported[method] + if key_len > 0: + key, _ = EVP_BytesToKey(password, key_len, iv_len) + else: + key = password + return key + +def encrypt_iv_len(method): + method = method.lower() + if method not in method_supported: + raise Exception('method not supported') + (key_len, iv_len, m) = method_supported[method] + return iv_len + +def encrypt_new_iv(method): + method = method.lower() + if method not in method_supported: + raise Exception('method not supported') + (key_len, iv_len, m) = method_supported[method] + return random_string(iv_len) + +CIPHERS_TO_TEST = [ + 'aes-128-cfb', + 'aes-256-cfb', + 'aes-256-gcm', + 'rc4-md5', + 'salsa20', + 'chacha20', + 'table', +] + + +def test_encryptor(): + from os import urandom + plain = urandom(10240) + for method in CIPHERS_TO_TEST: + logging.warn(method) + encryptor = Encryptor(b'key', method) + decryptor = Encryptor(b'key', method) + cipher = encryptor.encrypt(plain) + plain2 = decryptor.decrypt(cipher) + assert plain == plain2 + + +def test_encrypt_all(): + from os import urandom + plain = urandom(10240) + for method in CIPHERS_TO_TEST: + logging.warn(method) + cipher = encrypt_all(b'key', method, plain) + plain2, key, iv = decrypt_all(b'key', method, cipher) + assert plain == plain2 + + +def test_encrypt_all_m(): + from os import urandom + plain = urandom(10240) + for method in CIPHERS_TO_TEST: + logging.warn(method) + key, iv, m = gen_key_iv(b'key', method) + cipher = encrypt_all_m(key, iv, m, method, plain) + plain2, key, iv = decrypt_all(b'key', method, cipher) + assert plain == plain2 + + +if __name__ == '__main__': + test_encrypt_all() + test_encryptor() + test_encrypt_all_m() diff --git a/shadowsocks/encrypt_test.py b/shadowsocks/encrypt_test.py new file mode 100644 index 0000000..3a974ac --- /dev/null +++ b/shadowsocks/encrypt_test.py @@ -0,0 +1,53 @@ +from __future__ import absolute_import, division, print_function, \ + with_statement + +import sys +import os + +sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../')) + + +from shadowsocks.crypto import rc4_md5 +from shadowsocks.crypto import openssl +from shadowsocks.crypto import sodium +from shadowsocks.crypto import table + + +def run(func): + try: + func() + except: + pass + + +def run_n(func, name): + try: + func(name) + except: + pass + + +def main(): + print("\n""rc4_md5") + rc4_md5.test() + print("\n""aes-256-cfb") + openssl.test_aes_256_cfb() + print("\n""aes-128-cfb") + openssl.test_aes_128_cfb() + print("\n""bf-cfb") + run(openssl.test_bf_cfb) + print("\n""camellia-128-cfb") + run_n(openssl.run_method, "camellia-128-cfb") + print("\n""cast5-cfb") + run_n(openssl.run_method, "cast5-cfb") + print("\n""idea-cfb") + run_n(openssl.run_method, "idea-cfb") + print("\n""seed-cfb") + run_n(openssl.run_method, "seed-cfb") + print("\n""salsa20") + run(sodium.test_salsa20) + print("\n""chacha20") + run(sodium.test_chacha20) + +if __name__ == '__main__': + main() diff --git a/shadowsocks/eventloop.py b/shadowsocks/eventloop.py new file mode 100644 index 0000000..467f5b8 --- /dev/null +++ b/shadowsocks/eventloop.py @@ -0,0 +1,263 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2013-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# from ssloop +# https://github.com/clowwindy/ssloop + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import time +import socket +import select +import errno +import logging +from collections import defaultdict + +from shadowsocks import shell + + +__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR', + 'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES'] + +POLL_NULL = 0x00 +POLL_IN = 0x01 +POLL_OUT = 0x04 +POLL_ERR = 0x08 +POLL_HUP = 0x10 +POLL_NVAL = 0x20 + + +EVENT_NAMES = { + POLL_NULL: 'POLL_NULL', + POLL_IN: 'POLL_IN', + POLL_OUT: 'POLL_OUT', + POLL_ERR: 'POLL_ERR', + POLL_HUP: 'POLL_HUP', + POLL_NVAL: 'POLL_NVAL', +} + +# we check timeouts every TIMEOUT_PRECISION seconds +TIMEOUT_PRECISION = 2 + + +class KqueueLoop(object): + + MAX_EVENTS = 1024 + + def __init__(self): + self._kqueue = select.kqueue() + self._fds = {} + + def _control(self, fd, mode, flags): + events = [] + if mode & POLL_IN: + events.append(select.kevent(fd, select.KQ_FILTER_READ, flags)) + if mode & POLL_OUT: + events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags)) + for e in events: + self._kqueue.control([e], 0) + + def poll(self, timeout): + if timeout < 0: + timeout = None # kqueue behaviour + events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout) + results = defaultdict(lambda: POLL_NULL) + for e in events: + fd = e.ident + if e.filter == select.KQ_FILTER_READ: + results[fd] |= POLL_IN + elif e.filter == select.KQ_FILTER_WRITE: + results[fd] |= POLL_OUT + return results.items() + + def register(self, fd, mode): + self._fds[fd] = mode + self._control(fd, mode, select.KQ_EV_ADD) + + def unregister(self, fd): + if fd in self._fds: + self._control(fd, self._fds[fd], select.KQ_EV_DELETE) + del self._fds[fd] + + def modify(self, fd, mode): + self.unregister(fd) + self.register(fd, mode) + + def close(self): + self._kqueue.close() + + +class SelectLoop(object): + + def __init__(self): + self._r_list = set() + self._w_list = set() + self._x_list = set() + + def poll(self, timeout): + r, w, x = select.select(self._r_list, self._w_list, self._x_list, + timeout) + results = defaultdict(lambda: POLL_NULL) + for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]: + for fd in p[0]: + results[fd] |= p[1] + return results.items() + + def register(self, fd, mode): + if mode & POLL_IN: + self._r_list.add(fd) + if mode & POLL_OUT: + self._w_list.add(fd) + if mode & POLL_ERR: + self._x_list.add(fd) + + def unregister(self, fd): + if fd in self._r_list: + self._r_list.remove(fd) + if fd in self._w_list: + self._w_list.remove(fd) + if fd in self._x_list: + self._x_list.remove(fd) + + def modify(self, fd, mode): + self.unregister(fd) + self.register(fd, mode) + + def close(self): + pass + + +class EventLoop(object): + + def __init__(self): + if hasattr(select, 'epoll'): + self._impl = select.epoll() + model = 'epoll' + elif hasattr(select, 'kqueue'): + self._impl = KqueueLoop() + model = 'kqueue' + elif hasattr(select, 'select'): + self._impl = SelectLoop() + model = 'select' + else: + raise Exception('can not find any available functions in select ' + 'package') + self._fdmap = {} # (f, handler) + self._last_time = time.time() + self._periodic_callbacks = [] + self._stopping = False + logging.debug('using event model: %s', model) + + def poll(self, timeout=None): + events = self._impl.poll(timeout) + return [(self._fdmap[fd][0], fd, event) for fd, event in events] + + def add(self, f, mode, handler): + fd = f.fileno() + self._fdmap[fd] = (f, handler) + self._impl.register(fd, mode) + + def remove(self, f): + fd = f.fileno() + if fd in self._fdmap: + del self._fdmap[fd] + self._impl.unregister(fd) + + def removefd(self, fd): + if fd in self._fdmap: + del self._fdmap[fd] + self._impl.unregister(fd) + + def add_periodic(self, callback): + self._periodic_callbacks.append(callback) + + def remove_periodic(self, callback): + if callback in self._periodic_callbacks: + self._periodic_callbacks.remove(callback) + + def modify(self, f, mode): + fd = f.fileno() + self._impl.modify(fd, mode) + + def stop(self): + self._stopping = True + + def run(self): + events = [] + while not self._stopping: + asap = False + try: + events = self.poll(TIMEOUT_PRECISION) + except (OSError, IOError) as e: + if errno_from_exception(e) in (errno.EPIPE, errno.EINTR): + # EPIPE: Happens when the client closes the connection + # EINTR: Happens when received a signal + # handles them as soon as possible + asap = True + logging.debug('poll:%s', e) + else: + logging.error('poll:%s', e) + import traceback + traceback.print_exc() + continue + + handle = False + for sock, fd, event in events: + handler = self._fdmap.get(fd, None) + if handler is not None: + handler = handler[1] + try: + handle = handler.handle_event(sock, fd, event) or handle + except (OSError, IOError) as e: + shell.print_exception(e) + now = time.time() + if asap or now - self._last_time >= TIMEOUT_PRECISION: + for callback in self._periodic_callbacks: + callback() + self._last_time = now + if events and not handle: + time.sleep(0.001) + + def __del__(self): + self._impl.close() + + +# from tornado +def errno_from_exception(e): + """Provides the errno from an Exception object. + + There are cases that the errno attribute was not set so we pull + the errno out of the args but if someone instatiates an Exception + without any args you will get a tuple error. So this function + abstracts all that behavior to give you a safe way to get the + errno. + """ + + if hasattr(e, 'errno'): + return e.errno + elif e.args: + return e.args[0] + else: + return None + + +# from tornado +def get_sock_error(sock): + error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) + return socket.error(error_number, os.strerror(error_number)) diff --git a/shadowsocks/local.py b/shadowsocks/local.py new file mode 100644 index 0000000..5c2f30e --- /dev/null +++ b/shadowsocks/local.py @@ -0,0 +1,89 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2012-2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import sys +import os +import logging +import signal + +if __name__ == '__main__': + import inspect + file_path = os.path.dirname( + os.path.realpath( + inspect.getfile( + inspect.currentframe()))) + sys.path.insert(0, os.path.join(file_path, '../')) + +from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, asyncdns + + +def main(): + shell.check_python() + + # fix py2exe + if hasattr(sys, "frozen") and sys.frozen in \ + ("windows_exe", "console_exe"): + p = os.path.dirname(os.path.abspath(sys.executable)) + os.chdir(p) + + config = shell.get_config(True) + + if not config.get('dns_ipv6', False): + asyncdns.IPV6_CONNECTION_SUPPORT = False + + daemon.daemon_exec(config) + logging.info( + "local start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]" % + (config['protocol'], + config['password'], + config['method'], + config['obfs'], + config['obfs_param'])) + + try: + logging.info("starting local at %s:%d" % + (config['local_address'], config['local_port'])) + + dns_resolver = asyncdns.DNSResolver() + tcp_server = tcprelay.TCPRelay(config, dns_resolver, True) + udp_server = udprelay.UDPRelay(config, dns_resolver, True) + loop = eventloop.EventLoop() + dns_resolver.add_to_loop(loop) + tcp_server.add_to_loop(loop) + udp_server.add_to_loop(loop) + + def handler(signum, _): + logging.warn('received SIGQUIT, doing graceful shutting down..') + tcp_server.close(next_tick=True) + udp_server.close(next_tick=True) + signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), handler) + + def int_handler(signum, _): + sys.exit(1) + signal.signal(signal.SIGINT, int_handler) + + daemon.set_user(config.get('user', None)) + loop.run() + except Exception as e: + shell.print_exception(e) + sys.exit(1) + +if __name__ == '__main__': + main() diff --git a/shadowsocks/logrun.sh b/shadowsocks/logrun.sh new file mode 100644 index 0000000..2b7a5bd --- /dev/null +++ b/shadowsocks/logrun.sh @@ -0,0 +1,6 @@ +#!/bin/bash +cd `dirname $0` +eval $(ps -ef | grep "[0-9] python server\\.py a" | awk '{print "kill "$2}') +ulimit -n 4096 +nohup python server.py a >> ssserver.log 2>&1 & + diff --git a/shadowsocks/lru_cache.py b/shadowsocks/lru_cache.py new file mode 100644 index 0000000..2f48c7f --- /dev/null +++ b/shadowsocks/lru_cache.py @@ -0,0 +1,184 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import collections +import logging +import time + +if __name__ == '__main__': + import os + import sys + import inspect + file_path = os.path.dirname( + os.path.realpath( + inspect.getfile( + inspect.currentframe()))) + sys.path.insert(0, os.path.join(file_path, '../')) + +try: + from collections import OrderedDict + print("loaded collections.OrderedDict") +except: + from shadowsocks.ordereddict import OrderedDict + +# this LRUCache is optimized for concurrency, not QPS +# n: concurrency, keys stored in the cache +# m: visits not timed out, proportional to QPS * timeout +# get & set is O(1), not O(n). thus we can support very large n +# sweep is O((n - m)) or O(1024) at most, +# no metter how large the cache or timeout value is + +SWEEP_MAX_ITEMS = 1024 + + +class LRUCache(collections.MutableMapping): + """This class is not thread safe""" + + def __init__(self, timeout=60, close_callback=None, *args, **kwargs): + self.timeout = timeout + self.close_callback = close_callback + self._store = {} + self._keys_to_last_time = OrderedDict() + self.update(dict(*args, **kwargs)) # use the free update to set keys + + def __getitem__(self, key): + # O(1) + t = time.time() + last_t = self._keys_to_last_time[key] + del self._keys_to_last_time[key] + self._keys_to_last_time[key] = t + return self._store[key] + + def __setitem__(self, key, value): + # O(1) + t = time.time() + if key in self._keys_to_last_time: + del self._keys_to_last_time[key] + self._keys_to_last_time[key] = t + self._store[key] = value + + def __delitem__(self, key): + # O(1) + last_t = self._keys_to_last_time[key] + del self._store[key] + del self._keys_to_last_time[key] + + def __iter__(self): + return iter(self._store) + + def __len__(self): + return len(self._store) + + def first(self): + if len(self._keys_to_last_time) > 0: + for key in self._keys_to_last_time: + return key + + def sweep(self, sweep_item_cnt=SWEEP_MAX_ITEMS): + # O(n - m) + now = time.time() + c = 0 + while c < sweep_item_cnt: + if len(self._keys_to_last_time) == 0: + break + for key in self._keys_to_last_time: + break + last_t = self._keys_to_last_time[key] + if now - last_t <= self.timeout: + break + value = self._store[key] + if self.close_callback is not None: + self.close_callback(value) + del self._store[key] + del self._keys_to_last_time[key] + c += 1 + if c: + logging.debug('%d keys swept' % c) + return c < SWEEP_MAX_ITEMS + + def clear(self, keep): + now = time.time() + c = 0 + while len(self._keys_to_last_time) > keep: + if len(self._keys_to_last_time) == 0: + break + for key in self._keys_to_last_time: + break + last_t = self._keys_to_last_time[key] + value = self._store[key] + if self.close_callback is not None: + self.close_callback(value) + del self._store[key] + del self._keys_to_last_time[key] + c += 1 + if c: + logging.debug('%d keys swept' % c) + return c < SWEEP_MAX_ITEMS + + +def test(): + c = LRUCache(timeout=0.3) + + c['a'] = 1 + assert c['a'] == 1 + c['a'] = 1 + + time.sleep(0.5) + c.sweep() + assert 'a' not in c + + c['a'] = 2 + c['b'] = 3 + time.sleep(0.2) + c.sweep() + assert c['a'] == 2 + assert c['b'] == 3 + + time.sleep(0.2) + c.sweep() + c['b'] + time.sleep(0.2) + c.sweep() + assert 'a' not in c + assert c['b'] == 3 + + time.sleep(0.5) + c.sweep() + assert 'a' not in c + assert 'b' not in c + + global close_cb_called + close_cb_called = False + + def close_cb(t): + global close_cb_called + assert not close_cb_called + close_cb_called = True + + c = LRUCache(timeout=0.1, close_callback=close_cb) + c['s'] = 1 + c['s'] + time.sleep(0.1) + c['s'] + time.sleep(0.3) + c.sweep() + +if __name__ == '__main__': + test() diff --git a/shadowsocks/manager.py b/shadowsocks/manager.py new file mode 100644 index 0000000..6b25a49 --- /dev/null +++ b/shadowsocks/manager.py @@ -0,0 +1,291 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import errno +import traceback +import socket +import logging +import json +import collections + +from shadowsocks import common, eventloop, tcprelay, udprelay, asyncdns, shell + + +BUF_SIZE = 1506 +STAT_SEND_LIMIT = 50 + + +class Manager(object): + + def __init__(self, config): + self._config = config + self._relays = {} # (tcprelay, udprelay) + self._loop = eventloop.EventLoop() + self._dns_resolver = asyncdns.DNSResolver() + self._dns_resolver.add_to_loop(self._loop) + + self._statistics = collections.defaultdict(int) + self._control_client_addr = None + try: + manager_address = common.to_str(config['manager_address']) + if ':' in manager_address: + addr = manager_address.rsplit(':', 1) + addr = addr[0], int(addr[1]) + addrs = socket.getaddrinfo(addr[0], addr[1]) + if addrs: + family = addrs[0][0] + else: + logging.error('invalid address: %s', manager_address) + exit(1) + else: + addr = manager_address + family = socket.AF_UNIX + self._control_socket = socket.socket(family, + socket.SOCK_DGRAM) + self._control_socket.bind(addr) + self._control_socket.setblocking(False) + except (OSError, IOError) as e: + logging.error(e) + logging.error('can not bind to manager address') + exit(1) + self._loop.add(self._control_socket, + eventloop.POLL_IN, self) + self._loop.add_periodic(self.handle_periodic) + + port_password = config['port_password'] + del config['port_password'] + for port, password in port_password.items(): + a_config = config.copy() + a_config['server_port'] = int(port) + a_config['password'] = password + self.add_port(a_config) + + def add_port(self, config): + port = int(config['server_port']) + servers = self._relays.get(port, None) + if servers: + logging.error("server already exists at %s:%d" % (config['server'], + port)) + return + logging.info("adding server at %s:%d" % (config['server'], port)) + t = tcprelay.TCPRelay(config, self._dns_resolver, False, + stat_callback=self.stat_callback) + u = udprelay.UDPRelay(config, self._dns_resolver, False, + stat_callback=self.stat_callback) + t.add_to_loop(self._loop) + u.add_to_loop(self._loop) + self._relays[port] = (t, u) + + def remove_port(self, config): + port = int(config['server_port']) + servers = self._relays.get(port, None) + if servers: + logging.info("removing server at %s:%d" % (config['server'], port)) + t, u = servers + t.close(next_tick=False) + u.close(next_tick=False) + del self._relays[port] + else: + logging.error("server not exist at %s:%d" % (config['server'], + port)) + + def handle_event(self, sock, fd, event): + if sock == self._control_socket and event == eventloop.POLL_IN: + data, self._control_client_addr = sock.recvfrom(BUF_SIZE) + parsed = self._parse_command(data) + if parsed: + command, config = parsed + a_config = self._config.copy() + if config: + # let the command override the configuration file + a_config.update(config) + if 'server_port' not in a_config: + logging.error('can not find server_port in config') + else: + if command == 'add': + self.add_port(a_config) + self._send_control_data(b'ok') + elif command == 'remove': + self.remove_port(a_config) + self._send_control_data(b'ok') + elif command == 'ping': + self._send_control_data(b'pong') + else: + logging.error('unknown command %s', command) + + def _parse_command(self, data): + # commands: + # add: {"server_port": 8000, "password": "foobar"} + # remove: {"server_port": 8000"} + data = common.to_str(data) + parts = data.split(':', 1) + if len(parts) < 2: + return data, None + command, config_json = parts + try: + config = shell.parse_json_in_str(config_json) + return command, config + except Exception as e: + logging.error(e) + return None + + def stat_callback(self, port, data_len): + self._statistics[port] += data_len + + def handle_periodic(self): + r = {} + i = 0 + + def send_data(data_dict): + if data_dict: + # use compact JSON format (without space) + data = common.to_bytes(json.dumps(data_dict, + separators=(',', ':'))) + self._send_control_data(b'stat: ' + data) + + for k, v in self._statistics.items(): + r[k] = v + i += 1 + # split the data into segments that fit in UDP packets + if i >= STAT_SEND_LIMIT: + send_data(r) + r.clear() + i = 0 + if len(r) > 0: + send_data(r) + self._statistics.clear() + + def _send_control_data(self, data): + if self._control_client_addr: + try: + self._control_socket.sendto(data, self._control_client_addr) + except (socket.error, OSError, IOError) as e: + error_no = eventloop.errno_from_exception(e) + if error_no in (errno.EAGAIN, errno.EINPROGRESS, + errno.EWOULDBLOCK): + return + else: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + + def run(self): + self._loop.run() + + +def run(config): + Manager(config).run() + + +def test(): + import time + import threading + import struct + from shadowsocks import encrypt + + logging.basicConfig(level=5, + format='%(asctime)s %(levelname)-8s %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + enc = [] + eventloop.TIMEOUT_PRECISION = 1 + + def run_server(): + config = shell.get_config(True) + config = config.copy() + a_config = { + 'server': '127.0.0.1', + 'local_port': 1081, + 'port_password': { + '8381': 'foobar1', + '8382': 'foobar2' + }, + 'method': 'aes-256-cfb', + 'manager_address': '127.0.0.1:6001', + 'timeout': 60, + 'fast_open': False, + 'verbose': 2 + } + config.update(a_config) + manager = Manager(config) + enc.append(manager) + manager.run() + + t = threading.Thread(target=run_server) + t.start() + time.sleep(1) + manager = enc[0] + cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + cli.connect(('127.0.0.1', 6001)) + + # test add and remove + time.sleep(1) + cli.send(b'add: {"server_port":7001, "password":"asdfadsfasdf"}') + time.sleep(1) + assert 7001 in manager._relays + data, addr = cli.recvfrom(1506) + assert b'ok' in data + + cli.send(b'remove: {"server_port":8381}') + time.sleep(1) + assert 8381 not in manager._relays + data, addr = cli.recvfrom(1506) + assert b'ok' in data + logging.info('add and remove test passed') + + # test statistics for TCP + header = common.pack_addr(b'google.com') + struct.pack('>H', 80) + data = encrypt.encrypt_all(b'asdfadsfasdf', 'aes-256-cfb', 1, + header + b'GET /\r\n\r\n') + tcp_cli = socket.socket() + tcp_cli.connect(('127.0.0.1', 7001)) + tcp_cli.send(data) + tcp_cli.recv(4096) + tcp_cli.close() + + data, addr = cli.recvfrom(1506) + data = common.to_str(data) + assert data.startswith('stat: ') + data = data.split('stat:')[1] + stats = shell.parse_json_in_str(data) + assert '7001' in stats + logging.info('TCP statistics test passed') + + # test statistics for UDP + header = common.pack_addr(b'127.0.0.1') + struct.pack('>H', 80) + data = encrypt.encrypt_all(b'foobar2', 'aes-256-cfb', 1, + header + b'test') + udp_cli = socket.socket(type=socket.SOCK_DGRAM) + udp_cli.sendto(data, ('127.0.0.1', 8382)) + tcp_cli.close() + + data, addr = cli.recvfrom(1506) + data = common.to_str(data) + assert data.startswith('stat: ') + data = data.split('stat:')[1] + stats = json.loads(data) + assert '8382' in stats + logging.info('UDP statistics test passed') + + manager._loop.stop() + t.join() + + +if __name__ == '__main__': + test() diff --git a/shadowsocks/obfs.py b/shadowsocks/obfs.py new file mode 100644 index 0000000..dab0338 --- /dev/null +++ b/shadowsocks/obfs.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging + +from shadowsocks import common +from shadowsocks.obfsplugin import plain, http_simple, obfs_tls, verify, auth, auth_chain, simple_obfs_http, simple_obfs_tls + + +method_supported = {} +method_supported.update(plain.obfs_map) +method_supported.update(http_simple.obfs_map) +method_supported.update(obfs_tls.obfs_map) +method_supported.update(verify.obfs_map) +method_supported.update(auth.obfs_map) +method_supported.update(auth_chain.obfs_map) +method_supported.update(simple_obfs_http.obfs_map) +method_supported.update(simple_obfs_tls.obfs_map) + + +class server_info(object): + + def __init__(self, data): + self.data = data + + +class obfs(object): + + def __init__(self, method): + method = common.to_str(method) + self.method = method + self._method_info = self.get_method_info(method) + if self._method_info: + self.obfs = self.get_obfs(method) + else: + raise Exception('obfs plugin [%s] not supported' % method) + + def init_data(self): + return self.obfs.init_data() + + def set_server_info(self, server_info): + return self.obfs.set_server_info(server_info) + + def get_server_info(self): + return self.obfs.get_server_info() + + def get_method_info(self, method): + method = method.lower() + m = method_supported.get(method) + return m + + def get_obfs(self, method): + m = self._method_info + return m[0](method) + + def get_overhead(self, direction): + return self.obfs.get_overhead(direction) + + def client_pre_encrypt(self, buf): + return self.obfs.client_pre_encrypt(buf) + + def client_encode(self, buf): + return self.obfs.client_encode(buf) + + def client_decode(self, buf): + return self.obfs.client_decode(buf) + + def client_post_decrypt(self, buf): + return self.obfs.client_post_decrypt(buf) + + def server_pre_encrypt(self, buf): + return self.obfs.server_pre_encrypt(buf) + + def server_encode(self, buf): + return self.obfs.server_encode(buf) + + def server_decode(self, buf): + return self.obfs.server_decode(buf) + + def server_post_decrypt(self, buf): + return self.obfs.server_post_decrypt(buf) + + def client_udp_pre_encrypt(self, buf): + return self.obfs.client_udp_pre_encrypt(buf) + + def client_udp_post_decrypt(self, buf): + return self.obfs.client_udp_post_decrypt(buf) + + def server_udp_pre_encrypt(self, buf, uid): + return self.obfs.server_udp_pre_encrypt(buf, uid) + + def server_udp_post_decrypt(self, buf): + return self.obfs.server_udp_post_decrypt(buf) + + def dispose(self): + self.obfs.dispose() + del self.obfs + + def get_hostname(self): + return self.obfs.host_name diff --git a/shadowsocks/obfsplugin/__init__.py b/shadowsocks/obfsplugin/__init__.py new file mode 100644 index 0000000..401c7b7 --- /dev/null +++ b/shadowsocks/obfsplugin/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement diff --git a/shadowsocks/obfsplugin/auth.py b/shadowsocks/obfsplugin/auth.py new file mode 100644 index 0000000..0f26365 --- /dev/null +++ b/shadowsocks/obfsplugin/auth.py @@ -0,0 +1,1234 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging +import binascii +import base64 +import time +import datetime +import random +import math +import struct +import zlib +import hmac +import hashlib + +import shadowsocks +from shadowsocks import common, lru_cache, encrypt +from shadowsocks.obfsplugin import plain +from shadowsocks.common import to_bytes, to_str, ord, chr + +def create_auth_sha1(method): + return auth_sha1(method) + +def create_auth_sha1_v2(method): + return auth_sha1_v2(method) + +def create_auth_sha1_v4(method): + return auth_sha1_v4(method) + +def create_auth_aes128_md5(method): + return auth_aes128_sha1(method, hashlib.md5) + +def create_auth_aes128_sha1(method): + return auth_aes128_sha1(method, hashlib.sha1) + +obfs_map = { + 'auth_sha1': (create_auth_sha1,), + 'auth_sha1_compatible': (create_auth_sha1,), + 'auth_sha1_v2': (create_auth_sha1_v2,), + 'auth_sha1_v2_compatible': (create_auth_sha1_v2,), + 'auth_sha1_v4': (create_auth_sha1_v4,), + 'auth_sha1_v4_compatible': (create_auth_sha1_v4,), + 'auth_aes128_md5': (create_auth_aes128_md5,), + 'auth_aes128_sha1': (create_auth_aes128_sha1,), +} + +def match_begin(str1, str2): + if len(str1) >= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class obfs_verify_data(object): + def __init__(self): + pass + +class auth_base(plain.plain): + def __init__(self, method): + super(auth_base, self).__init__(method) + self.method = method + self.no_compatible_method = '' + self.overhead = 7 + + def init_data(self): + return '' + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def set_server_info(self, server_info): + self.server_info = server_info + + def client_encode(self, buf): + return buf + + def client_decode(self, buf): + return (buf, False) + + def server_encode(self, buf): + return buf + + def server_decode(self, buf): + return (buf, True, False) + + def not_match_return(self, buf): + self.raw_trans = True + self.overhead = 0 + if self.method == self.no_compatible_method: + return (b'E'*2048, False) + return (buf, False) + +class client_queue(object): + def __init__(self, begin_id): + self.front = begin_id - 64 + self.back = begin_id + 1 + self.alloc = {} + self.enable = True + self.last_update = time.time() + + def update(self): + self.last_update = time.time() + + def is_active(self): + return time.time() - self.last_update < 60 * 3 + + def re_enable(self, connection_id): + self.enable = True + self.front = connection_id - 64 + self.back = connection_id + 1 + self.alloc = {} + + def insert(self, connection_id): + if not self.enable: + logging.warn('obfs auth: not enable') + return False + if not self.is_active(): + self.re_enable(connection_id) + self.update() + if connection_id < self.front: + logging.warn('obfs auth: deprecated id, someone replay attack') + return False + if connection_id > self.front + 0x4000: + logging.warn('obfs auth: wrong id') + return False + if connection_id in self.alloc: + logging.warn('obfs auth: duplicate id, someone replay attack') + return False + if self.back <= connection_id: + self.back = connection_id + 1 + self.alloc[connection_id] = 1 + while (self.front in self.alloc) or self.front + 0x1000 < self.back: + if self.front in self.alloc: + del self.alloc[self.front] + self.front += 1 + return True + +class obfs_auth_data(object): + def __init__(self): + self.client_id = {} + self.startup_time = int(time.time() - 30) & 0xFFFFFFFF + self.local_client_id = b'' + self.connection_id = 0 + self.set_max_client(64) # max active client count + + def update(self, client_id, connection_id): + if client_id in self.client_id: + self.client_id[client_id].update() + + def set_max_client(self, max_client): + self.max_client = max_client + self.max_buffer = max(self.max_client * 2, 256) + + def insert(self, client_id, connection_id): + if client_id not in self.client_id or not self.client_id[client_id].enable: + active = 0 + for c_id in self.client_id: + if self.client_id[c_id].is_active(): + active += 1 + if active >= self.max_client: + logging.warn('obfs auth: max active clients exceeded') + return False + + if len(self.client_id) < self.max_client: + if client_id not in self.client_id: + self.client_id[client_id] = client_queue(connection_id) + else: + self.client_id[client_id].re_enable(connection_id) + return self.client_id[client_id].insert(connection_id) + keys = self.client_id.keys() + random.shuffle(keys) + for c_id in keys: + if not self.client_id[c_id].is_active() and self.client_id[c_id].enable: + if len(self.client_id) >= self.max_buffer: + del self.client_id[c_id] + else: + self.client_id[c_id].enable = False + if client_id not in self.client_id: + self.client_id[client_id] = client_queue(connection_id) + else: + self.client_id[client_id].re_enable(connection_id) + return self.client_id[client_id].insert(connection_id) + logging.warn('obfs auth: no inactive client [assert]') + return False + else: + return self.client_id[client_id].insert(connection_id) + +class auth_sha1(auth_base): + def __init__(self, method): + super(auth_sha1, self).__init__(method) + self.recv_buf = b'' + self.unit_len = 8000 + self.decrypt_packet_num = 0 + self.raw_trans = False + self.has_sent_header = False + self.has_recv_header = False + self.client_id = 0 + self.connection_id = 0 + self.max_time_dif = 60 * 60 # time dif (second) setting + self.no_compatible_method = 'auth_sha1' + + def init_data(self): + return obfs_auth_data() + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + + def pack_data(self, buf): + rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 16) + data = common.chr(len(rnd_data) + 1) + rnd_data + buf + data = struct.pack('>H', len(data) + 6) + data + adler32 = zlib.adler32(data) & 0xFFFFFFFF + data += struct.pack('H', len(data) + 16) + data + crc = binascii.crc32(self.server_info.key) & 0xFFFFFFFF + data = struct.pack(' 0xFF000000: + self.server_info.data.local_client_id = b'' + if not self.server_info.data.local_client_id: + self.server_info.data.local_client_id = os.urandom(4) + logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),)) + self.server_info.data.connection_id = struct.unpack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + if struct.pack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + if not self.has_recv_header: + if len(self.recv_buf) < 6: + return (b'', False) + crc = struct.pack('H', self.recv_buf[4:6])[0] + if length > 2048: + return self.not_match_return(self.recv_buf) + if length > len(self.recv_buf): + return (b'', False) + sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, self.recv_buf[:length - 10], hashlib.sha1).digest()[:10] + if sha1data != self.recv_buf[length - 10:length]: + logging.error('auth_sha1 data uncorrect auth HMAC-SHA1') + return self.not_match_return(self.recv_buf) + pos = common.ord(self.recv_buf[6]) + 6 + out_buf = self.recv_buf[pos:length - 10] + if len(out_buf) < 12: + logging.info('auth_sha1: too short, data %s' % (binascii.hexlify(self.recv_buf),)) + return self.not_match_return(self.recv_buf) + utc_time = struct.unpack(' self.max_time_dif \ + or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2: + logging.info('auth_sha1: wrong timestamp, time_dif %d, data %s' % (time_dif, binascii.hexlify(out_buf),)) + return self.not_match_return(self.recv_buf) + elif self.server_info.data.insert(client_id, connection_id): + self.has_recv_header = True + out_buf = out_buf[12:] + self.client_id = client_id + self.connection_id = connection_id + else: + logging.info('auth_sha1: auth fail, data %s' % (binascii.hexlify(out_buf),)) + return self.not_match_return(self.recv_buf) + self.recv_buf = self.recv_buf[length:] + self.has_recv_header = True + + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + logging.info('auth_sha1: over size') + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + if struct.pack(' 1300: + return b'\x01' + + if buf_size > 400: + rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 128) + return common.chr(len(rnd_data) + 1) + rnd_data + + rnd_data = os.urandom(struct.unpack('>H', os.urandom(2))[0] % 1024) + return common.chr(255) + struct.pack('>H', len(rnd_data) + 3) + rnd_data + + def pack_data(self, buf): + data = self.rnd_data(len(buf)) + buf + data = struct.pack('>H', len(data) + 6) + data + adler32 = zlib.adler32(data) & 0xFFFFFFFF + data += struct.pack('H', len(data) + 16) + data + crc = binascii.crc32(self.salt + self.server_info.key) & 0xFFFFFFFF + data = struct.pack(' 0xFF000000: + self.server_info.data.local_client_id = b'' + if not self.server_info.data.local_client_id: + self.server_info.data.local_client_id = os.urandom(8) + logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),)) + self.server_info.data.connection_id = struct.unpack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + if struct.pack('H', self.recv_buf[3:5])[0] + 2 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return out_buf + + def server_pre_encrypt(self, buf): + if self.raw_trans: + return buf + ret = b'' + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + sendback = False + + if not self.has_recv_header: + if len(self.recv_buf) < 6: + return (b'', False) + crc = struct.pack('H', self.recv_buf[4:6])[0] + if length > 2048: + return self.not_match_return(self.recv_buf) + if length > len(self.recv_buf): + return (b'', False) + sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, self.recv_buf[:length - 10], hashlib.sha1).digest()[:10] + if sha1data != self.recv_buf[length - 10:length]: + logging.error('auth_sha1_v2 data uncorrect auth HMAC-SHA1') + return self.not_match_return(self.recv_buf) + pos = common.ord(self.recv_buf[6]) + if pos < 255: + pos += 6 + else: + pos = struct.unpack('>H', self.recv_buf[7:9])[0] + 6 + out_buf = self.recv_buf[pos:length - 10] + if len(out_buf) < 12: + logging.info('auth_sha1_v2: too short, data %s' % (binascii.hexlify(self.recv_buf),)) + return self.not_match_return(self.recv_buf) + client_id = struct.unpack(' 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + logging.info('auth_sha1_v2: over size') + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + if struct.pack('H', self.recv_buf[3:5])[0] + 2 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + if pos == length - 4: + sendback = True + + if out_buf: + self.server_info.data.update(self.client_id, self.connection_id) + self.decrypt_packet_num += 1 + return (out_buf, sendback) + +class auth_sha1_v4(auth_base): + def __init__(self, method): + super(auth_sha1_v4, self).__init__(method) + self.recv_buf = b'' + self.unit_len = 8100 + self.decrypt_packet_num = 0 + self.raw_trans = False + self.has_sent_header = False + self.has_recv_header = False + self.client_id = 0 + self.connection_id = 0 + self.max_time_dif = 60 * 60 * 24 # time dif (second) setting + self.salt = b"auth_sha1_v4" + self.no_compatible_method = 'auth_sha1_v4' + + def init_data(self): + return obfs_auth_v2_data() + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + + def rnd_data(self, buf_size): + if buf_size > 1200: + return b'\x01' + + if buf_size > 400: + rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 256) + else: + rnd_data = os.urandom(struct.unpack('>H', os.urandom(2))[0] % 512) + + if len(rnd_data) < 128: + return common.chr(len(rnd_data) + 1) + rnd_data + else: + return common.chr(255) + struct.pack('>H', len(rnd_data) + 3) + rnd_data + + def pack_data(self, buf): + data = self.rnd_data(len(buf)) + buf + data_len = len(data) + 8 + crc = binascii.crc32(struct.pack('>H', data_len)) & 0xFFFF + data = struct.pack('H', data_len) + data + adler32 = zlib.adler32(data) & 0xFFFFFFFF + data += struct.pack('H', data_len) + self.salt + self.server_info.key) & 0xFFFFFFFF + data = struct.pack('H', data_len) + data + data += hmac.new(self.server_info.iv + self.server_info.key, data, hashlib.sha1).digest()[:10] + return data + + def auth_data(self): + utc_time = int(time.time()) & 0xFFFFFFFF + if self.server_info.data.connection_id > 0xFF000000: + self.server_info.data.local_client_id = b'' + if not self.server_info.data.local_client_id: + self.server_info.data.local_client_id = os.urandom(4) + logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),)) + self.server_info.data.connection_id = struct.unpack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 4: + crc = struct.pack('H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + if struct.pack('H', self.recv_buf[5:7])[0] + 4 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return out_buf + + def server_pre_encrypt(self, buf): + if self.raw_trans: + return buf + ret = b'' + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + sendback = False + + if not self.has_recv_header: + if len(self.recv_buf) <= 6: + return (b'', False) + crc = struct.pack('H', self.recv_buf[:2])[0] + if length > len(self.recv_buf): + return (b'', False) + sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, self.recv_buf[:length - 10], hashlib.sha1).digest()[:10] + if sha1data != self.recv_buf[length - 10:length]: + logging.error('auth_sha1_v4 data uncorrect auth HMAC-SHA1') + return self.not_match_return(self.recv_buf) + pos = common.ord(self.recv_buf[6]) + if pos < 255: + pos += 6 + else: + pos = struct.unpack('>H', self.recv_buf[7:9])[0] + 6 + out_buf = self.recv_buf[pos:length - 10] + if len(out_buf) < 12: + logging.info('auth_sha1_v4: too short, data %s' % (binascii.hexlify(self.recv_buf),)) + return self.not_match_return(self.recv_buf) + utc_time = struct.unpack(' self.max_time_dif: + logging.info('auth_sha1_v4: wrong timestamp, time_dif %d, data %s' % (time_dif, binascii.hexlify(out_buf),)) + return self.not_match_return(self.recv_buf) + elif self.server_info.data.insert(client_id, connection_id): + self.has_recv_header = True + out_buf = out_buf[12:] + self.client_id = client_id + self.connection_id = connection_id + else: + logging.info('auth_sha1_v4: auth fail, data %s' % (binascii.hexlify(out_buf),)) + return self.not_match_return(self.recv_buf) + self.recv_buf = self.recv_buf[length:] + self.has_recv_header = True + sendback = True + + while len(self.recv_buf) > 4: + crc = struct.pack('H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + logging.info('auth_sha1_v4: over size') + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + if struct.pack('H', self.recv_buf[5:7])[0] + 4 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + if pos == length - 4: + sendback = True + + if out_buf: + self.server_info.data.update(self.client_id, self.connection_id) + self.decrypt_packet_num += 1 + return (out_buf, sendback) + +class obfs_auth_mu_data(object): + def __init__(self): + self.user_id = {} + self.local_client_id = b'' + self.connection_id = 0 + self.set_max_client(64) # max active client count + + def update(self, user_id, client_id, connection_id): + if user_id not in self.user_id: + self.user_id[user_id] = lru_cache.LRUCache() + local_client_id = self.user_id[user_id] + + if client_id in local_client_id: + local_client_id[client_id].update() + + def set_max_client(self, max_client): + self.max_client = max_client + self.max_buffer = max(self.max_client * 2, 1024) + + def insert(self, user_id, client_id, connection_id): + if user_id not in self.user_id: + self.user_id[user_id] = lru_cache.LRUCache() + local_client_id = self.user_id[user_id] + + if local_client_id.get(client_id, None) is None or not local_client_id[client_id].enable: + if local_client_id.first() is None or len(local_client_id) < self.max_client: + if client_id not in local_client_id: + #TODO: check + local_client_id[client_id] = client_queue(connection_id) + else: + local_client_id[client_id].re_enable(connection_id) + return local_client_id[client_id].insert(connection_id) + + if not local_client_id[local_client_id.first()].is_active(): + del local_client_id[local_client_id.first()] + if client_id not in local_client_id: + #TODO: check + local_client_id[client_id] = client_queue(connection_id) + else: + local_client_id[client_id].re_enable(connection_id) + return local_client_id[client_id].insert(connection_id) + + logging.warn('auth_aes128: no inactive client') + return False + else: + return local_client_id[client_id].insert(connection_id) + +class auth_aes128_sha1(auth_base): + def __init__(self, method, hashfunc): + super(auth_aes128_sha1, self).__init__(method) + self.hashfunc = hashfunc + self.recv_buf = b'' + self.unit_len = 8100 + self.raw_trans = False + self.has_sent_header = False + self.has_recv_header = False + self.client_id = 0 + self.connection_id = 0 + self.max_time_dif = 60 * 60 * 24 # time dif (second) setting + self.salt = hashfunc == hashlib.md5 and b"auth_aes128_md5" or b"auth_aes128_sha1" + self.no_compatible_method = hashfunc == hashlib.md5 and "auth_aes128_md5" or 'auth_aes128_sha1' + self.extra_wait_size = struct.unpack('>H', os.urandom(2))[0] % 1024 + self.pack_id = 1 + self.recv_id = 1 + self.user_id = None + self.user_key = None + self.last_rnd_len = 0 + self.overhead = 9 + + def init_data(self): + return obfs_auth_mu_data() + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param.split('#')[0]) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + + def trapezoid_random_float(self, d): + if d == 0: + return random.random() + s = random.random() + a = 1 - d + return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d) + + def trapezoid_random_int(self, max_val, d): + v = self.trapezoid_random_float(d) + return int(v * max_val) + + def rnd_data_len(self, buf_size, full_buf_size): + if full_buf_size >= self.server_info.buffer_size: + return 0 + tcp_mss = self.server_info.tcp_mss + rev_len = tcp_mss - buf_size - 9 + if rev_len == 0: + return 0 + if rev_len < 0: + if rev_len > -tcp_mss: + return self.trapezoid_random_int(rev_len + tcp_mss, -0.3) + return common.ord(os.urandom(1)[0]) % 32 + if buf_size > 900: + return struct.unpack('>H', os.urandom(2))[0] % rev_len + return self.trapezoid_random_int(rev_len, -0.3) + + def rnd_data(self, buf_size, full_buf_size): + data_len = self.rnd_data_len(buf_size, full_buf_size) + + if data_len < 128: + return common.chr(data_len + 1) + os.urandom(data_len) + + return common.chr(255) + struct.pack(' 400: + rnd_len = struct.unpack(' 0xFF000000: + self.server_info.data.local_client_id = b'' + if not self.server_info.data.local_client_id: + self.server_info.data.local_client_id = os.urandom(4) + logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),)) + self.server_info.data.connection_id = struct.unpack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len], ogn_data_len) + buf = buf[self.unit_len:] + ret += self.pack_data(buf, ogn_data_len) + self.last_rnd_len = ogn_data_len + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 4: + mac_key = self.user_key + struct.pack('= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data uncorrect checksum') + + self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF + pos = common.ord(self.recv_buf[4]) + if pos < 255: + pos += 4 + else: + pos = struct.unpack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len], ogn_data_len) + buf = buf[self.unit_len:] + ret += self.pack_data(buf, ogn_data_len) + self.last_rnd_len = ogn_data_len + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + sendback = False + + if not self.has_recv_header: + if len(self.recv_buf) >= 7 or len(self.recv_buf) in [2, 3]: + recv_len = min(len(self.recv_buf), 7) + mac_key = self.server_info.recv_iv + self.server_info.key + sha1data = hmac.new(mac_key, self.recv_buf[:1], self.hashfunc).digest()[:recv_len - 1] + if sha1data != self.recv_buf[1:recv_len]: + return self.not_match_return(self.recv_buf) + + if len(self.recv_buf) < 31: + return (b'', False) + sha1data = hmac.new(mac_key, self.recv_buf[7:27], self.hashfunc).digest()[:4] + if sha1data != self.recv_buf[27:31]: + logging.error('%s data uncorrect auth HMAC-SHA1 from %s:%d, data %s' % (self.no_compatible_method, self.server_info.client, self.server_info.client_port, binascii.hexlify(self.recv_buf))) + if len(self.recv_buf) < 31 + self.extra_wait_size: + return (b'', False) + return self.not_match_return(self.recv_buf) + + uid = struct.unpack(' self.max_time_dif: + logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head))) + return self.not_match_return(self.recv_buf) + elif self.server_info.data.insert(self.user_id, client_id, connection_id): + self.has_recv_header = True + out_buf = self.recv_buf[31 + rnd_len:length - 4] + self.client_id = client_id + self.connection_id = connection_id + else: + logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf))) + return self.not_match_return(self.recv_buf) + self.recv_buf = self.recv_buf[length:] + self.has_recv_header = True + sendback = True + + while len(self.recv_buf) > 4: + mac_key = self.user_key + struct.pack('= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + if self.recv_id == 0: + logging.info(self.no_compatible_method + ': over size') + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + if hmac.new(mac_key, self.recv_buf[:length - 4], self.hashfunc).digest()[:4] != self.recv_buf[length - 4:length]: + logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length]))) + self.raw_trans = True + self.recv_buf = b'' + if self.recv_id == 0: + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data uncorrect checksum') + + self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF + pos = common.ord(self.recv_buf[4]) + if pos < 255: + pos += 4 + else: + pos = struct.unpack('> 17) ^ (y >> 26)) & xorshift128plus.max_int + self.v1 = x + return (x + y) & xorshift128plus.max_int + + def init_from_bin(self, bin): + bin += b'\0' * 16 + self.v0 = struct.unpack('= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class auth_base(plain.plain): + def __init__(self, method): + super(auth_base, self).__init__(method) + self.method = method + self.no_compatible_method = '' + self.overhead = 4 + + def init_data(self): + return '' + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def set_server_info(self, server_info): + self.server_info = server_info + + def client_encode(self, buf): + return buf + + def client_decode(self, buf): + return (buf, False) + + def server_encode(self, buf): + return buf + + def server_decode(self, buf): + return (buf, True, False) + + def not_match_return(self, buf): + self.raw_trans = True + self.overhead = 0 + if self.method == self.no_compatible_method: + return (b'E'*2048, False) + return (buf, False) + +class client_queue(object): + def __init__(self, begin_id): + self.front = begin_id - 64 + self.back = begin_id + 1 + self.alloc = {} + self.enable = True + self.last_update = time.time() + self.ref = 0 + + def update(self): + self.last_update = time.time() + + def addref(self): + self.ref += 1 + + def delref(self): + if self.ref > 0: + self.ref -= 1 + + def is_active(self): + return (self.ref > 0) and (time.time() - self.last_update < 60 * 10) + + def re_enable(self, connection_id): + self.enable = True + self.front = connection_id - 64 + self.back = connection_id + 1 + self.alloc = {} + + def insert(self, connection_id): + if not self.enable: + logging.warn('obfs auth: not enable') + return False + if not self.is_active(): + self.re_enable(connection_id) + self.update() + if connection_id < self.front: + logging.warn('obfs auth: deprecated id, someone replay attack') + return False + if connection_id > self.front + 0x4000: + logging.warn('obfs auth: wrong id') + return False + if connection_id in self.alloc: + logging.warn('obfs auth: duplicate id, someone replay attack') + return False + if self.back <= connection_id: + self.back = connection_id + 1 + self.alloc[connection_id] = 1 + while (self.front in self.alloc) or self.front + 0x1000 < self.back: + if self.front in self.alloc: + del self.alloc[self.front] + self.front += 1 + self.addref() + return True + +class obfs_auth_chain_data(object): + def __init__(self, name): + self.name = name + self.user_id = {} + self.local_client_id = b'' + self.connection_id = 0 + self.set_max_client(64) # max active client count + + def update(self, user_id, client_id, connection_id): + if user_id not in self.user_id: + self.user_id[user_id] = lru_cache.LRUCache() + local_client_id = self.user_id[user_id] + + if client_id in local_client_id: + local_client_id[client_id].update() + + def set_max_client(self, max_client): + self.max_client = max_client + self.max_buffer = max(self.max_client * 2, 1024) + + def insert(self, user_id, client_id, connection_id): + if user_id not in self.user_id: + self.user_id[user_id] = lru_cache.LRUCache() + local_client_id = self.user_id[user_id] + + if local_client_id.get(client_id, None) is None or not local_client_id[client_id].enable: + if local_client_id.first() is None or len(local_client_id) < self.max_client: + if client_id not in local_client_id: + #TODO: check + local_client_id[client_id] = client_queue(connection_id) + else: + local_client_id[client_id].re_enable(connection_id) + return local_client_id[client_id].insert(connection_id) + + if not local_client_id[local_client_id.first()].is_active(): + del local_client_id[local_client_id.first()] + if client_id not in local_client_id: + #TODO: check + local_client_id[client_id] = client_queue(connection_id) + else: + local_client_id[client_id].re_enable(connection_id) + return local_client_id[client_id].insert(connection_id) + + logging.warn(self.name + ': no inactive client') + return False + else: + return local_client_id[client_id].insert(connection_id) + + def remove(self, user_id, client_id): + if user_id in self.user_id: + local_client_id = self.user_id[user_id] + if client_id in local_client_id: + local_client_id[client_id].delref() + +class auth_chain_a(auth_base): + def __init__(self, method): + super(auth_chain_a, self).__init__(method) + self.hashfunc = hashlib.md5 + self.recv_buf = b'' + self.unit_len = 2800 + self.raw_trans = False + self.has_sent_header = False + self.has_recv_header = False + self.client_id = 0 + self.connection_id = 0 + self.max_time_dif = 60 * 60 * 24 # time dif (second) setting + self.salt = b"auth_chain_a" + self.no_compatible_method = 'auth_chain_a' + self.pack_id = 1 + self.recv_id = 1 + self.user_id = None + self.user_id_num = 0 + self.user_key = None + self.overhead = 4 + self.client_over_head = 4 + self.last_client_hash = b'' + self.last_server_hash = b'' + self.random_client = xorshift128plus() + self.random_server = xorshift128plus() + self.encryptor = None + + def init_data(self): + return obfs_auth_chain_data(self.method) + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param.split('#')[0]) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + + def trapezoid_random_float(self, d): + if d == 0: + return random.random() + s = random.random() + a = 1 - d + return (math.sqrt(a * a + 4 * d * s) - a) / (2 * d) + + def trapezoid_random_int(self, max_val, d): + v = self.trapezoid_random_float(d) + return int(v * max_val) + + def rnd_data_len(self, buf_size, last_hash, random): + if buf_size > 1440: + return 0 + random.init_from_bin_len(last_hash, buf_size) + if buf_size > 1300: + return random.next() % 31 + if buf_size > 900: + return random.next() % 127 + if buf_size > 400: + return random.next() % 521 + return random.next() % 1021 + + def udp_rnd_data_len(self, last_hash, random): + random.init_from_bin(last_hash) + return random.next() % 127 + + def rnd_start_pos(self, rand_len, random): + if rand_len > 0: + return random.next() % 8589934609 % rand_len + return 0 + + def rnd_data(self, buf_size, buf, last_hash, random): + rand_len = self.rnd_data_len(buf_size, last_hash, random) + + rnd_data_buf = rand_bytes(rand_len) + + if buf_size == 0: + return rnd_data_buf + else: + if rand_len > 0: + start_pos = self.rnd_start_pos(rand_len, random) + return rnd_data_buf[:start_pos] + buf + rnd_data_buf[start_pos:] + else: + return buf + + def pack_client_data(self, buf): + buf = self.encryptor.encrypt(buf) + data = self.rnd_data(len(buf), buf, self.last_client_hash, self.random_client) + data_len = len(data) + 8 + mac_key = self.user_key + struct.pack(' 0xFF000000: + self.server_info.data.local_client_id = b'' + if not self.server_info.data.local_client_id: + self.server_info.data.local_client_id = rand_bytes(4) + logging.debug("local_client_id %s" % (binascii.hexlify(self.server_info.data.local_client_id),)) + self.server_info.data.connection_id = struct.unpack(' self.unit_len: + ret += self.pack_client_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_client_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 4: + mac_key = self.user_key + struct.pack('= 4096: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + + if length + 4 > len(self.recv_buf): + break + + server_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest() + if server_hash[:2] != self.recv_buf[length + 2 : length + 4]: + logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length]))) + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data uncorrect checksum') + + pos = 2 + if data_len > 0 and rand_len > 0: + pos = 2 + self.rnd_start_pos(rand_len, self.random_server) + out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos]) + self.last_server_hash = server_hash + if self.recv_id == 1: + self.server_info.tcp_mss = struct.unpack(' self.unit_len: + ret += self.pack_server_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_server_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + sendback = False + + if not self.has_recv_header: + if len(self.recv_buf) >= 12 or len(self.recv_buf) in [7, 8]: + recv_len = min(len(self.recv_buf), 12) + mac_key = self.server_info.recv_iv + self.server_info.key + md5data = hmac.new(mac_key, self.recv_buf[:4], self.hashfunc).digest() + if md5data[:recv_len - 4] != self.recv_buf[4:recv_len]: + return self.not_match_return(self.recv_buf) + + if len(self.recv_buf) < 12 + 24: + return (b'', False) + + self.last_client_hash = md5data + uid = struct.unpack(' self.max_time_dif: + logging.info('%s: wrong timestamp, time_dif %d, data %s' % (self.no_compatible_method, time_dif, binascii.hexlify(head))) + return self.not_match_return(self.recv_buf) + elif self.server_info.data.insert(self.user_id, client_id, connection_id): + self.has_recv_header = True + self.client_id = client_id + self.connection_id = connection_id + else: + logging.info('%s: auth fail, data %s' % (self.no_compatible_method, binascii.hexlify(out_buf))) + return self.not_match_return(self.recv_buf) + + self.encryptor = encrypt.Encryptor(to_bytes(base64.b64encode(self.user_key)) + to_bytes(base64.b64encode(self.last_client_hash)), 'rc4') + self.recv_buf = self.recv_buf[36:] + self.has_recv_header = True + sendback = True + + while len(self.recv_buf) > 4: + mac_key = self.user_key + struct.pack('= 4096: + self.raw_trans = True + self.recv_buf = b'' + if self.recv_id == 0: + logging.info(self.no_compatible_method + ': over size') + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + + if length + 4 > len(self.recv_buf): + break + + client_hash = hmac.new(mac_key, self.recv_buf[:length + 2], self.hashfunc).digest() + if client_hash[:2] != self.recv_buf[length + 2 : length + 4]: + logging.info('%s: checksum error, data %s' % (self.no_compatible_method, binascii.hexlify(self.recv_buf[:length]))) + self.raw_trans = True + self.recv_buf = b'' + if self.recv_id == 0: + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data uncorrect checksum') + + self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF + pos = 2 + if data_len > 0 and rand_len > 0: + pos = 2 + self.rnd_start_pos(rand_len, self.random_client) + out_buf += self.encryptor.decrypt(self.recv_buf[pos : data_len + pos]) + self.last_client_hash = client_hash + self.recv_buf = self.recv_buf[length + 4:] + if data_len == 0: + sendback = True + + if out_buf: + self.server_info.data.update(self.user_id, self.client_id, self.connection_id) + return (out_buf, sendback) + + def client_udp_pre_encrypt(self, buf): + if self.user_key is None: + if b':' in to_bytes(self.server_info.protocol_param): + try: + items = to_bytes(self.server_info.protocol_param).split(':') + self.user_key = self.hashfunc(items[1]).digest() + self.user_id = struct.pack('= 1440: + return 0 + random.init_from_bin_len(last_hash, buf_size) + pos = bisect.bisect_left(self.data_size_list, buf_size + self.server_info.overhead) + final_pos = pos + random.next() % (len(self.data_size_list)) + if final_pos < len(self.data_size_list): + return self.data_size_list[final_pos] - buf_size - self.server_info.overhead + + pos = bisect.bisect_left(self.data_size_list2, buf_size + self.server_info.overhead) + final_pos = pos + random.next() % (len(self.data_size_list2)) + if final_pos < len(self.data_size_list2): + return self.data_size_list2[final_pos] - buf_size - self.server_info.overhead + if final_pos < pos + len(self.data_size_list2) - 1: + return 0 + + if buf_size > 1300: + return random.next() % 31 + if buf_size > 900: + return random.next() % 127 + if buf_size > 400: + return random.next() % 521 + return random.next() % 1021 + + + + + +class auth_chain_c(auth_chain_b): + def __init__(self, method): + super(auth_chain_c, self).__init__(method) + self.salt = b"auth_chain_c" + self.no_compatible_method = 'auth_chain_c' + self.data_size_list0 = [] + + def init_data_size(self, key): + if self.data_size_list0: + self.data_size_list0 = [] + random = xorshift128plus() + random.init_from_bin(key) + list_len = random.next() % (8 + 16) + (4 + 8) + for i in range(0, list_len): + self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440)) + self.data_size_list0.sort() + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param.split('#')[0]) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + self.init_data_size(self.server_info.key) + + def rnd_data_len(self, buf_size, last_hash, random): + other_data_size = buf_size + self.server_info.overhead + random.init_from_bin_len(last_hash, buf_size) + if other_data_size >= self.data_size_list0[-1]: + if other_data_size >= 1440: + return 0 + if other_data_size > 1300: + return random.next() % 31 + if other_data_size > 900: + return random.next() % 127 + if other_data_size > 400: + return random.next() % 521 + return random.next() % 1021 + + pos = bisect.bisect_left(self.data_size_list0, other_data_size) + final_pos = pos + random.next() % (len(self.data_size_list0) - pos) + return self.data_size_list0[final_pos] - other_data_size + + +class auth_chain_d(auth_chain_b): + def __init__(self, method): + super(auth_chain_d, self).__init__(method) + self.salt = b"auth_chain_d" + self.no_compatible_method = 'auth_chain_d' + self.data_size_list0 = [] + + def check_and_patch_data_size(self, random): + if self.data_size_list0[-1] < 1300 and len(self.data_size_list0) < 64: + self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440)) + self.check_and_patch_data_size(random) + + def init_data_size(self, key): + if self.data_size_list0: + self.data_size_list0 = [] + random = xorshift128plus() + random.init_from_bin(key) + list_len = random.next() % (8 + 16) + (4 + 8) + for i in range(0, list_len): + self.data_size_list0.append((int)(random.next() % 2340 % 2040 % 1440)) + self.data_size_list0.sort() + old_len = len(self.data_size_list0) + self.check_and_patch_data_size(random) + if old_len != len(self.data_size_list0): + self.data_size_list0.sort() + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param.split('#')[0]) + except: + max_client = 64 + self.server_info.data.set_max_client(max_client) + self.init_data_size(self.server_info.key) + + def rnd_data_len(self, buf_size, last_hash, random): + other_data_size = buf_size + self.server_info.overhead + if other_data_size >= self.data_size_list0[-1]: + return 0 + + random.init_from_bin_len(last_hash, buf_size) + pos = bisect.bisect_left(self.data_size_list0, other_data_size) + final_pos = pos + random.next() % (len(self.data_size_list0) - pos) + return self.data_size_list0[final_pos] - other_data_size + + +class auth_chain_e(auth_chain_d): + def __init__(self, method): + super(auth_chain_e, self).__init__(method) + self.salt = b"auth_chain_e" + self.no_compatible_method = 'auth_chain_e' + + def rnd_data_len(self, buf_size, last_hash, random): + random.init_from_bin_len(last_hash, buf_size) + other_data_size = buf_size + self.server_info.overhead + if other_data_size >= self.data_size_list0[-1]: + return 0 + + pos = bisect.bisect_left(self.data_size_list0, other_data_size) + return self.data_size_list0[pos] - other_data_size + + +class auth_chain_f(auth_chain_e): + def __init__(self, method): + super(auth_chain_f, self).__init__(method) + self.salt = b"auth_chain_f" + self.no_compatible_method = 'auth_chain_f' + + def set_server_info(self, server_info): + self.server_info = server_info + try: + max_client = int(server_info.protocol_param.split('#')[0]) + except: + max_client = 64 + try: + self.key_change_interval = int(server_info.protocol_param.split('#')[1]) + except: + self.key_change_interval = 60 * 60 * 24 + self.key_change_datetime_key = int(int(time.time()) / self.key_change_interval) + self.key_change_datetime_key_bytes = [] + for i in range(7, -1, -1): + self.key_change_datetime_key_bytes.append((self.key_change_datetime_key >> (8 * i)) & 0xFF) + self.server_info.data.set_max_client(max_client) + self.init_data_size(self.server_info.key) + + def init_data_size(self, key): + if self.data_size_list0: + self.data_size_list0 = [] + random = xorshift128plus() + new_key = bytearray(key) + for i in range(0, 8): + new_key[i] ^= self.key_change_datetime_key_bytes[i] + random.init_from_bin(new_key) + list_len = random.next() % (8 + 16) + (4 + 8) + for i in range(0, list_len): + self.data_size_list0.append(int(random.next() % 2340 % 2040 % 1440)) + self.data_size_list0.sort() + old_len = len(self.data_size_list0) + self.check_and_patch_data_size(random) + if old_len != len(self.data_size_list0): + self.data_size_list0.sort() diff --git a/shadowsocks/obfsplugin/http_simple.py b/shadowsocks/obfsplugin/http_simple.py new file mode 100644 index 0000000..e4dc577 --- /dev/null +++ b/shadowsocks/obfsplugin/http_simple.py @@ -0,0 +1,351 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging +import binascii +import struct +import base64 +import datetime +import random + +from shadowsocks import common +from shadowsocks.obfsplugin import plain +from shadowsocks.common import to_bytes, to_str, ord, chr + +def create_http_simple_obfs(method): + return http_simple(method) + +def create_http_post_obfs(method): + return http_post(method) + +def create_random_head_obfs(method): + return random_head(method) + +obfs_map = { + 'http_simple': (create_http_simple_obfs,), + 'http_simple_compatible': (create_http_simple_obfs,), + 'http_post': (create_http_post_obfs,), + 'http_post_compatible': (create_http_post_obfs,), + 'random_head': (create_random_head_obfs,), + 'random_head_compatible': (create_random_head_obfs,), +} + +def match_begin(str1, str2): + if len(str1) >= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class http_simple(plain.plain): + def __init__(self, method): + self.method = method + self.has_sent_header = False + self.has_recv_header = False + self.host = None + self.port = 0 + self.recv_buffer = b'' + self.user_agent = [b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/40.0", + b"Mozilla/5.0 (Windows NT 6.3; WOW64; rv:40.0) Gecko/20100101 Firefox/44.0", + b"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36", + b"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.11 (KHTML, like Gecko) Ubuntu/11.10 Chromium/27.0.1453.93 Chrome/27.0.1453.93 Safari/537.36", + b"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:35.0) Gecko/20100101 Firefox/35.0", + b"Mozilla/5.0 (compatible; WOW64; MSIE 10.0; Windows NT 6.2)", + b"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27", + b"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.3; Trident/7.0; .NET4.0E; .NET4.0C)", + b"Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko", + b"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/BuildID) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36", + b"Mozilla/5.0 (iPad; CPU OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3", + b"Mozilla/5.0 (iPhone; CPU iPhone OS 5_0 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9A334 Safari/7534.48.3"] + + def encode_head(self, buf): + hexstr = binascii.hexlify(buf) + chs = [] + for i in range(0, len(hexstr), 2): + chs.append(b"%" + hexstr[i:i+2]) + return b''.join(chs) + + def client_encode(self, buf): + if self.has_sent_header: + return buf + head_size = len(self.server_info.iv) + self.server_info.head_len + if len(buf) - head_size > 64: + headlen = head_size + random.randint(0, 64) + else: + headlen = len(buf) + headdata = buf[:headlen] + buf = buf[headlen:] + port = b'' + if self.server_info.port != 80: + port = b':' + to_bytes(str(self.server_info.port)) + body = None + hosts = (self.server_info.obfs_param or self.server_info.host) + pos = hosts.find("#") + if pos >= 0: + body = hosts[pos + 1:].replace("\\n", "\r\n") + hosts = hosts[:pos] + hosts = hosts.split(',') + host = random.choice(hosts) + http_head = b"GET /" + self.encode_head(headdata) + b" HTTP/1.1\r\n" + http_head += b"Host: " + to_bytes(host) + port + b"\r\n" + if body: + http_head += body + "\r\n\r\n" + else: + http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n" + http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\nDNT: 1\r\nConnection: keep-alive\r\n\r\n" + self.has_sent_header = True + return http_head + buf + + def client_decode(self, buf): + if self.has_recv_header: + return (buf, False) + pos = buf.find(b'\r\n\r\n') + if pos >= 0: + self.has_recv_header = True + return (buf[pos + 4:], False) + else: + return (b'', False) + + def server_encode(self, buf): + if self.has_sent_header: + return buf + + header = b'HTTP/1.1 200 OK\r\nConnection: keep-alive\r\nContent-Encoding: gzip\r\nContent-Type: text/html\r\nDate: ' + header += to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')) + header += b'\r\nServer: nginx\r\nVary: Accept-Encoding\r\n\r\n' + self.has_sent_header = True + return header + buf + + def get_data_from_http_header(self, buf): + ret_buf = b'' + lines = buf.split(b'\r\n') + if lines and len(lines) > 1: + hex_items = lines[0].split(b'%') + if hex_items and len(hex_items) > 1: + for index in range(1, len(hex_items)): + if len(hex_items[index]) < 2: + ret_buf += binascii.unhexlify('0' + hex_items[index]) + break + elif len(hex_items[index]) > 2: + ret_buf += binascii.unhexlify(hex_items[index][:2]) + break + else: + ret_buf += binascii.unhexlify(hex_items[index]) + return ret_buf + return b'' + + def get_host_from_http_header(self, buf): + ret_buf = b'' + lines = buf.split(b'\r\n') + if lines and len(lines) > 1: + for line in lines: + if match_begin(line, b"Host: "): + return common.to_str(line[6:]) + + def not_match_return(self, buf): + self.has_sent_header = True + self.has_recv_header = True + if self.method == 'http_simple': + return (b'E'*2048, False, False) + return (buf, True, False) + + def error_return(self, buf): + self.has_sent_header = True + self.has_recv_header = True + return (b'E'*2048, False, False) + + def server_decode(self, buf): + if self.has_recv_header: + return (buf, True, False) + + self.recv_buffer += buf + buf = self.recv_buffer + if len(buf) > 10: + if match_begin(buf, b'GET /') or match_begin(buf, b'POST /'): + if len(buf) > 65536: + self.recv_buffer = None + logging.warn('http_simple: over size') + return self.not_match_return(buf) + else: #not http header, run on original protocol + self.recv_buffer = None + logging.debug('http_simple: not match begin') + return self.not_match_return(buf) + else: + return (b'', True, False) + + if b'\r\n\r\n' in buf: + datas = buf.split(b'\r\n\r\n', 1) + ret_buf = self.get_data_from_http_header(buf) + host = self.get_host_from_http_header(buf) + if host and self.server_info.obfs_param: + pos = host.find(":") + if pos >= 0: + host = host[:pos] + hosts = self.server_info.obfs_param.split(b',') + if common.to_bytes(host) not in hosts: + return self.not_match_return(buf) + if len(ret_buf) < 4: + return self.error_return(buf) + if len(datas) > 1: + ret_buf += datas[1] + if len(ret_buf) >= 13: + self.has_recv_header = True + return (ret_buf, True, False, host) + return self.not_match_return(buf) + else: + return (b'', True, False) + +class http_post(http_simple): + def __init__(self, method): + super(http_post, self).__init__(method) + + def boundary(self): + return to_bytes(''.join([random.choice("ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789") for i in range(32)])) + + def client_encode(self, buf): + if self.has_sent_header: + return buf + head_size = len(self.server_info.iv) + self.server_info.head_len + if len(buf) - head_size > 64: + headlen = head_size + random.randint(0, 64) + else: + headlen = len(buf) + headdata = buf[:headlen] + buf = buf[headlen:] + port = b'' + if self.server_info.port != 80: + port = b':' + to_bytes(str(self.server_info.port)) + body = None + hosts = (self.server_info.obfs_param or self.server_info.host) + pos = hosts.find("#") + if pos >= 0: + body = hosts[pos + 1:].replace("\\n", "\r\n") + hosts = hosts[:pos] + hosts = hosts.split(',') + host = random.choice(hosts) + http_head = b"POST /" + self.encode_head(headdata) + b" HTTP/1.1\r\n" + http_head += b"Host: " + to_bytes(host) + port + b"\r\n" + if body: + http_head += body + "\r\n\r\n" + else: + http_head += b"User-Agent: " + random.choice(self.user_agent) + b"\r\n" + http_head += b"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.8\r\nAccept-Encoding: gzip, deflate\r\n" + http_head += b"Content-Type: multipart/form-data; boundary=" + self.boundary() + b"\r\nDNT: 1\r\n" + http_head += b"Connection: keep-alive\r\n\r\n" + self.has_sent_header = True + return http_head + buf + + def not_match_return(self, buf): + self.has_sent_header = True + self.has_recv_header = True + if self.method == 'http_post': + return (b'E'*2048, False, False) + return (buf, True, False) + + def server_decode(self, buf): + if self.has_recv_header: + return (buf, True, False) + + self.recv_buffer += buf + buf = self.recv_buffer + if len(buf) > 10: + if match_begin(buf, b'GET ') or match_begin(buf, b'POST '): + if len(buf) > 65536: + self.recv_buffer = None + logging.warn('http_post: over size') + return self.not_match_return(buf) + else: #not http header, run on original protocol + self.recv_buffer = None + logging.debug('http_post: not match begin') + return self.not_match_return(buf) + else: + return (b'', True, False) + + if b'\r\n\r\n' in buf: + datas = buf.split(b'\r\n\r\n', 1) + ret_buf = self.get_data_from_http_header(buf) + host = self.get_host_from_http_header(buf) + if host and self.server_info.obfs_param: + pos = host.find(b":") + if pos >= 0: + host = host[:pos] + hosts = self.server_info.obfs_param.split(b',') + if common.to_bytes(host) not in hosts: + return self.not_match_return(buf) + if len(datas) > 1: + ret_buf += datas[1] + if len(ret_buf) >= 7: + self.has_recv_header = True + return (ret_buf, True, False, host) + return self.not_match_return(buf) + else: + return (b'', True, False) + +class random_head(plain.plain): + def __init__(self, method): + self.method = method + self.has_sent_header = False + self.has_recv_header = False + self.raw_trans_sent = False + self.raw_trans_recv = False + self.send_buffer = b'' + + def client_encode(self, buf): + if self.raw_trans_sent: + return buf + self.send_buffer += buf + if not self.has_sent_header: + self.has_sent_header = True + data = os.urandom(common.ord(os.urandom(1)[0]) % 96 + 4) + crc = (0xffffffff - binascii.crc32(data)) & 0xffffffff + return data + struct.pack('= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class obfs_auth_data(object): + def __init__(self): + self.client_data = lru_cache.LRUCache(60 * 5) + self.client_id = os.urandom(32) + self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF + +class tls_ticket_auth(plain.plain): + def __init__(self, method): + self.method = method + self.handshake_status = 0 + self.send_buffer = b'' + self.recv_buffer = b'' + self.client_id = b'' + self.max_time_dif = 60 * 60 * 24 # time dif (second) setting + self.tls_version = b'\x03\x03' + self.overhead = 5 + + def init_data(self): + return obfs_auth_data() + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def sni(self, url): + url = common.to_bytes(url) + data = b"\x00" + struct.pack('>H', len(url)) + url + data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data + return data + + def pack_auth_data(self, client_id): + utc_time = int(time.time()) & 0xFFFFFFFF + data = struct.pack('>I', utc_time) + os.urandom(18) + data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10] + return data + + def client_encode(self, buf): + if self.handshake_status == -1: + return buf + if self.handshake_status == 8: + ret = b'' + while len(buf) > 2048: + size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf)) + ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size] + buf = buf[size:] + if len(buf) > 0: + ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + return ret + if len(buf) > 0: + self.send_buffer += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + if self.handshake_status == 0: + self.handshake_status = 1 + data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100") + ext = binascii.unhexlify(b"ff01000100") + host = self.server_info.obfs_param or self.server_info.host + if host and host[-1] in string.digits: + host = '' + hosts = host.split(',') + host = random.choice(hosts) + ext += self.sni(host) + ext += b"\x00\x17\x00\x00" + ext += b"\x00\x23\x00\xd0" + os.urandom(208) # ticket + ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203") + ext += binascii.unhexlify(b"000500050100000000") + ext += binascii.unhexlify(b"00120000") + ext += binascii.unhexlify(b"75500000") + ext += binascii.unhexlify(b"000b00020100") + ext += binascii.unhexlify(b"000a0006000400170018") + data += struct.pack('>H', len(ext)) + ext + data = b"\x01\x00" + struct.pack('>H', len(data)) + data + data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data + return data + elif self.handshake_status == 1 and len(buf) == 0: + data = b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec + data += b"\x16" + self.tls_version + b"\x00\x20" + os.urandom(22) #Finished + data += hmac.new(self.server_info.key + self.server_info.data.client_id, data, hashlib.sha1).digest()[:10] + ret = data + self.send_buffer + self.send_buffer = b'' + self.handshake_status = 8 + return ret + return b'' + + def client_decode(self, buf): + if self.handshake_status == -1: + return (buf, False) + + if self.handshake_status == 8: + ret = b'' + self.recv_buffer += buf + while len(self.recv_buffer) > 5: + if ord(self.recv_buffer[0]) != 0x17: + logging.info("data = %s" % (binascii.hexlify(self.recv_buffer))) + raise Exception('server_decode appdata error') + size = struct.unpack('>H', self.recv_buffer[3:5])[0] + if len(self.recv_buffer) < size + 5: + break + buf = self.recv_buffer[5:size+5] + ret += buf + self.recv_buffer = self.recv_buffer[size+5:] + return (ret, False) + + if len(buf) < 11 + 32 + 1 + 32: + raise Exception('client_decode data error') + verify = buf[11:33] + if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]: + raise Exception('client_decode data error') + if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]: + raise Exception('client_decode data error') + return (b'', True) + + def server_encode(self, buf): + if self.handshake_status == -1: + return buf + if (self.handshake_status & 8) == 8: + ret = b'' + while len(buf) > 2048: + size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf)) + ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size] + buf = buf[size:] + if len(buf) > 0: + ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + return ret + self.handshake_status |= 8 + data = self.tls_version + self.pack_auth_data(self.client_id) + b"\x20" + self.client_id + binascii.unhexlify(b"c02f000005ff01000100") + data = b"\x02\x00" + struct.pack('>H', len(data)) + data #server hello + data = b"\x16" + self.tls_version + struct.pack('>H', len(data)) + data + if random.randint(0, 8) < 1: + ticket = os.urandom((struct.unpack('>H', os.urandom(2))[0] % 164) * 2 + 64) + ticket = struct.pack('>H', len(ticket) + 4) + b"\x04\x00" + struct.pack('>H', len(ticket)) + ticket + data += b"\x16" + self.tls_version + ticket #New session ticket + data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec + finish_len = random.choice([32, 40]) + data += b"\x16" + self.tls_version + struct.pack('>H', finish_len) + os.urandom(finish_len - 10) #Finished + data += hmac.new(self.server_info.key + self.client_id, data, hashlib.sha1).digest()[:10] + if buf: + data += self.server_encode(buf) + return data + + def decode_error_return(self, buf): + self.handshake_status = -1 + if self.overhead > 0: + self.server_info.overhead -= self.overhead + self.overhead = 0 + if self.method == 'tls1.2_ticket_auth' or self.method == 'tls1.2_ticket_fastauth': + return (b'E'*2048, False, False) + return (buf, True, False) + + def server_decode(self, buf): + if self.handshake_status == -1: + return (buf, True, False) + + if (self.handshake_status & 4) == 4: + ret = b'' + self.recv_buffer += buf + while len(self.recv_buffer) > 5: + if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3: + logging.info("data = %s" % (binascii.hexlify(self.recv_buffer))) + raise Exception('server_decode appdata error') + size = struct.unpack('>H', self.recv_buffer[3:5])[0] + if len(self.recv_buffer) < size + 5: + break + ret += self.recv_buffer[5:size+5] + self.recv_buffer = self.recv_buffer[size+5:] + return (ret, True, False) + + if (self.handshake_status & 1) == 1: + self.recv_buffer += buf + buf = self.recv_buffer + verify = buf + if len(buf) < 11: + raise Exception('server_decode data error') + if not match_begin(buf, b"\x14" + self.tls_version + b"\x00\x01\x01"): #ChangeCipherSpec + raise Exception('server_decode data error') + buf = buf[6:] + if not match_begin(buf, b"\x16" + self.tls_version + b"\x00"): #Finished + raise Exception('server_decode data error') + verify_len = struct.unpack('>H', buf[3:5])[0] + 1 # 11 - 10 + if len(verify) < verify_len + 10: + return (b'', False, False) + if hmac.new(self.server_info.key + self.client_id, verify[:verify_len], hashlib.sha1).digest()[:10] != verify[verify_len:verify_len+10]: + raise Exception('server_decode data error') + self.recv_buffer = verify[verify_len + 10:] + status = self.handshake_status + self.handshake_status |= 4 + ret = self.server_decode(b'') + return ret; + + #raise Exception("handshake data = %s" % (binascii.hexlify(buf))) + self.recv_buffer += buf + buf = self.recv_buffer + ogn_buf = buf + if len(buf) < 3: + return (b'', False, False) + if not match_begin(buf, b'\x16\x03\x01'): + return self.decode_error_return(ogn_buf) + buf = buf[3:] + header_len = struct.unpack('>H', buf[:2])[0] + if header_len > len(buf) - 2: + return (b'', False, False) + + self.recv_buffer = self.recv_buffer[header_len + 5:] + self.handshake_status = 1 + buf = buf[2:header_len + 2] + if not match_begin(buf, b'\x01\x00'): #client hello + logging.info("tls_auth not client hello message") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + if struct.unpack('>H', buf[:2])[0] != len(buf) - 2: + logging.info("tls_auth wrong message size") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + if not match_begin(buf, self.tls_version): + logging.info("tls_auth wrong tls version") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + verifyid = buf[:32] + buf = buf[32:] + sessionid_len = ord(buf[0]) + if sessionid_len < 32: + logging.info("tls_auth wrong sessionid_len") + return self.decode_error_return(ogn_buf) + sessionid = buf[1:sessionid_len + 1] + buf = buf[sessionid_len+1:] + self.client_id = sessionid + sha1 = hmac.new(self.server_info.key + sessionid, verifyid[:22], hashlib.sha1).digest()[:10] + utc_time = struct.unpack('>I', verifyid[:4])[0] + time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time) + if self.server_info.obfs_param: + try: + self.max_time_dif = int(self.server_info.obfs_param) + except: + pass + if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \ + or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2): + logging.info("tls_auth wrong time") + return self.decode_error_return(ogn_buf) + if sha1 != verifyid[22:]: + logging.info("tls_auth wrong sha1") + return self.decode_error_return(ogn_buf) + if self.server_info.data.client_data.get(verifyid[:22]): + logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid))) + return self.decode_error_return(ogn_buf) + self.server_info.data.client_data.sweep() + self.server_info.data.client_data[verifyid[:22]] = sessionid + if len(self.recv_buffer) >= 11: + ret = self.server_decode(b'') + return (ret[0], True, True) + # (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back) + + buf = buf[48:] + + host_name = b'' + for index in range(len(buf)): + if index + 4 < len(buf): + if buf[index:index + 4] == b"\x00\x17\x00\x00": + if buf[:index] != '': + host_name = buf[:index] + host_name = host_name.decode('utf-8') + + return (b'', False, True, host_name) diff --git a/shadowsocks/obfsplugin/plain.py b/shadowsocks/obfsplugin/plain.py new file mode 100644 index 0000000..2a3c2bb --- /dev/null +++ b/shadowsocks/obfsplugin/plain.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging + +from shadowsocks.common import ord + +def create_obfs(method): + return plain(method) + +obfs_map = { + 'plain': (create_obfs,), + 'origin': (create_obfs,), +} + +class plain(object): + def __init__(self, method): + self.method = method + self.server_info = None + + def init_data(self): + return b'' + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return 0 + + def get_server_info(self): + return self.server_info + + def set_server_info(self, server_info): + self.server_info = server_info + + def client_pre_encrypt(self, buf): + return buf + + def client_encode(self, buf): + return buf + + def client_decode(self, buf): + # (buffer_to_recv, is_need_to_encode_and_send_back) + return (buf, False) + + def client_post_decrypt(self, buf): + return buf + + def server_pre_encrypt(self, buf): + return buf + + def server_encode(self, buf): + return buf + + def server_decode(self, buf): + # (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back) + return (buf, True, False) + + def server_post_decrypt(self, buf): + return (buf, False) + + def client_udp_pre_encrypt(self, buf): + return buf + + def client_udp_post_decrypt(self, buf): + return buf + + def server_udp_pre_encrypt(self, buf, uid): + return buf + + def server_udp_post_decrypt(self, buf): + return (buf, None) + + def dispose(self): + pass + + def get_head_size(self, buf, def_value): + if len(buf) < 2: + return def_value + head_type = ord(buf[0]) & 0x7 + if head_type == 1: + return 7 + if head_type == 4: + return 19 + if head_type == 3: + return 4 + ord(buf[1]) + return def_value diff --git a/shadowsocks/obfsplugin/simple_obfs_http.py b/shadowsocks/obfsplugin/simple_obfs_http.py new file mode 100644 index 0000000..5d19bb0 --- /dev/null +++ b/shadowsocks/obfsplugin/simple_obfs_http.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging +import binascii +import struct +import base64 +import datetime +import random + +from shadowsocks import common +from shadowsocks.obfsplugin import plain +from shadowsocks.common import to_bytes, to_str, ord, chr + +def create_simple_obfs_http_obfs(method): + return simple_obfs_http(method) + +obfs_map = { + 'simple_obfs_http': (create_simple_obfs_http_obfs,), + 'simple_obfs_http_compatible': (create_simple_obfs_http_obfs,), +} + +def match_begin(str1, str2): + if len(str1) >= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class simple_obfs_http(plain.plain): + def __init__(self, method): + self.method = method + self.has_sent_header = False + self.has_recv_header = False + self.host = None + self.port = 0 + self.recv_buffer = b'' + + self.curl_version = b"7." + common.to_bytes(str(random.randint(0, 51))) + b"." + common.to_bytes(str(random.randint(0, 2))) + self.nginx_version = b"1." + common.to_bytes(str(random.randint(0, 11))) + b"." + common.to_bytes(str(random.randint(0, 12))) + + def encode_head(self, buf): + hexstr = binascii.hexlify(buf) + chs = [] + for i in range(0, len(hexstr), 2): + chs.append(b"%" + hexstr[i:i+2]) + return b''.join(chs) + + def client_encode(self, buf): + raise Exception('Need to finish') + if self.has_sent_header: + return buf + port = b'' + if self.server_info.port != 80: + port = b':' + to_bytes(str(self.server_info.port)) + hosts = (self.server_info.obfs_param or self.server_info.host) + pos = hosts.find("#") + if pos >= 0: + body = hosts[pos + 1:].replace("\\n", "\r\n") + hosts = hosts[:pos] + hosts = hosts.split(',') + host = random.choice(hosts) + http_head = b"GET /" + b" HTTP/1.1\r\n" + http_head += b"Host: " + to_bytes(host) + port + b"\r\n" + http_head += b"User-Agent: curl/" + self.curl_version + b"\r\n" + http_head += b"Upgrade: websocket\r\n" + http_head += b"Connection: Upgrade\r\n" + http_head += b"Sec-WebSocket-Key: " + common.to_bytes(common.random_base64_str(64)) + b"\r\n" + http_head += b"Content-Length: " + len(buf) + b"\r\n" + http_head += b"\r\n" + self.has_sent_header = True + return http_head + buf + + def client_decode(self, buf): + raise Exception('Need to finish') + if self.has_recv_header: + return (buf, False) + pos = buf.find(b'\r\n\r\n') + if pos >= 0: + self.has_recv_header = True + return (buf[pos + 4:], False) + else: + return (b'', False) + + def server_encode(self, buf): + if self.has_sent_header: + return buf + + header = b'HTTP/1.1 101 Switching Protocols\r\n' + header += b'Server: nginx/' + self.nginx_version + b'\r\n' + header += b'Date: ' + to_bytes(datetime.datetime.now().strftime('%a, %d %b %Y %H:%M:%S GMT')) + header += b'\r\n' + header += b'Upgrade: websocket\r\n' + header += b'Connection: Upgrade\r\n' + header += b'Sec-WebSocket-Accept: ' + common.to_bytes(common.random_base64_str(64)) + b'\r\n' + header += b'\r\n' + self.has_sent_header = True + return header + buf + + def get_host_from_http_header(self, buf): + ret_buf = b'' + lines = buf.split(b'\r\n') + if lines and len(lines) > 1: + for line in lines: + if match_begin(line, b"Host: "): + return common.to_str(line[6:]) + + def not_match_return(self, buf): + self.has_sent_header = True + self.has_recv_header = True + if self.method == 'simple_obfs_http': + return (b'E'*2048, False, False) + return (buf, True, False) + + def server_decode(self, buf): + if self.has_recv_header: + return (buf, True, False) + + self.recv_buffer += buf + buf = self.recv_buffer + if len(buf) > 4: + if match_begin(buf, b'GET /') or match_begin(buf, b'POST /'): + if len(buf) > 65536: + self.recv_buffer = None + logging.warn('simple_obfs_http: over size') + return self.not_match_return(buf) + else: #not http header, run on original protocol + self.recv_buffer = None + logging.debug('simple_obfs_http: not match begin') + return self.not_match_return(buf) + else: + return (b'', True, False) + + if b'\r\n\r\n' in buf: + if b'Upgrade: websocket' not in buf: + self.recv_buffer = None + logging.debug('simple_obfs_http: protocol error') + return self.not_match_return(buf) + datas = buf.split(b'\r\n\r\n', 1) + host = self.get_host_from_http_header(buf) + if host and self.server_info.obfs_param: + pos = host.find(":") + if pos >= 0: + host = host[:pos] + hosts = self.server_info.obfs_param.split(b',') + if common.to_bytes(host) not in hosts: + return self.not_match_return(buf) + if len(datas) > 1: + self.has_recv_header = True + return (datas[1], True, False, host) + return self.not_match_return(buf) + else: + return (b'', True, False) diff --git a/shadowsocks/obfsplugin/simple_obfs_tls.py b/shadowsocks/obfsplugin/simple_obfs_tls.py new file mode 100644 index 0000000..2e85aa9 --- /dev/null +++ b/shadowsocks/obfsplugin/simple_obfs_tls.py @@ -0,0 +1,295 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging +import binascii +import struct +import base64 +import time +import random +import hmac +import hashlib +import string + +from shadowsocks import common +from shadowsocks.obfsplugin import plain +from shadowsocks.common import to_bytes, to_str, ord +from shadowsocks import lru_cache + +def create_simple_obfs_tls(method): + return simple_obfs_tls(method) + +obfs_map = { + 'simple_obfs_tls': (create_simple_obfs_tls,), + 'simple_obfs_tls_compatible': (create_simple_obfs_tls,), +} + +def match_begin(str1, str2): + if len(str1) >= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class obfs_auth_data(object): + def __init__(self): + self.client_data = lru_cache.LRUCache(60 * 5) + self.client_id = os.urandom(32) + self.startup_time = int(time.time() - 60 * 30) & 0xFFFFFFFF + +class simple_obfs_tls(plain.plain): + def __init__(self, method): + self.method = method + self.obfs_stage = 0 + self.deobfs_stage = 0 + self.send_buffer = b'' + self.recv_buffer = b'' + self.client_id = b'' + self.max_time_dif = 60 * 60 * 24 # time dif (second) setting + self.tls_version = b'\x03\x03' + self.overhead = 5 + + def init_data(self): + return obfs_auth_data() + + def get_overhead(self, direction): # direction: true for c->s false for s->c + return self.overhead + + def sni(self, url): + url = common.to_bytes(url) + data = b"\x00" + struct.pack('>H', len(url)) + url + data = b"\x00\x00" + struct.pack('>H', len(data) + 2) + struct.pack('>H', len(data)) + data + return data + + def pack_auth_data(self, client_id): + utc_time = int(time.time()) & 0xFFFFFFFF + data = struct.pack('>I', utc_time) + os.urandom(18) + data += hmac.new(self.server_info.key + client_id, data, hashlib.sha1).digest()[:10] + return data + + def client_encode(self, buf): + raise Exception('Need to finish') + if self.obfs_stage == -1: + return buf + if self.obfs_stage == 1: + self.obfs_stage += 1 + ret = b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + return ret + if self.obfs_stage == 0: + self.obfs_stage += 1 + data = self.tls_version + self.pack_auth_data(self.server_info.data.client_id) + b"\x20" + self.server_info.data.client_id + binascii.unhexlify(b"001cc02bc02fcca9cca8cc14cc13c00ac014c009c013009c0035002f000a" + b"0100") + ext = binascii.unhexlify(b"ff01000100") + host = self.server_info.obfs_param or self.server_info.host + if host and host[-1] in string.digits: + host = '' + hosts = host.split(',') + host = random.choice(hosts) + ext += self.sni(host) + ext += b"\x00\x17\x00\x00" + ext += b"\x00\x23\x00\xd0" + os.urandom(208) # ticket + ext += binascii.unhexlify(b"000d001600140601060305010503040104030301030302010203") + ext += binascii.unhexlify(b"000500050100000000") + ext += binascii.unhexlify(b"00120000") + ext += binascii.unhexlify(b"75500000") + ext += binascii.unhexlify(b"000b00020100") + ext += binascii.unhexlify(b"000a0006000400170018") + data += struct.pack('>H', len(ext)) + ext + data = b"\x01\x00" + struct.pack('>H', len(data)) + data + data = b"\x16\x03\x01" + struct.pack('>H', len(data)) + data + return data + return buf + + def client_decode(self, buf): + raise Exception('Need to finish') + if self.deobfs_stage == -1: + return (buf, False) + + if self.deobfs_stage == 8: + ret = b'' + self.recv_buffer += buf + while len(self.recv_buffer) > 5: + if ord(self.recv_buffer[0]) != 0x17: + logging.info("data = %s" % (binascii.hexlify(self.recv_buffer))) + raise Exception('server_decode appdata error') + size = struct.unpack('>H', self.recv_buffer[3:5])[0] + if len(self.recv_buffer) < size + 5: + break + buf = self.recv_buffer[5:size+5] + ret += buf + self.recv_buffer = self.recv_buffer[size+5:] + return (ret, False) + + if len(buf) < 11 + 32 + 1 + 32: + raise Exception('client_decode data error') + verify = buf[11:33] + if hmac.new(self.server_info.key + self.server_info.data.client_id, verify, hashlib.sha1).digest()[:10] != buf[33:43]: + raise Exception('client_decode data error') + if hmac.new(self.server_info.key + self.server_info.data.client_id, buf[:-10], hashlib.sha1).digest()[:10] != buf[-10:]: + raise Exception('client_decode data error') + return (b'', True) + + def server_encode(self, buf): + if self.obfs_stage == -1: + return buf + if self.obfs_stage == 1: + ret = b'' + while len(buf) > 2048: + size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf)) + ret += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size] + buf = buf[size:] + if len(buf) > 0: + ret += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + return ret + + utc_time = int(time.time()) & 0xFFFFFFFF + + if len(self.client_id) < 32: + session_id = os.urandom(32) + else: + session_id = self.client_id[:32] + + data = struct.pack('>I', utc_time) + os.urandom(28) + b"\x20" + session_id + b"\xcc\xa8\x00\x00\x00\xff\x01\x00\x01\x00\x00\x17\x00\x00\x00\x0b\x00\x02\x01\x00" #random_unix_time, ramdom_byte, session_id_len, session_id, the reset + data = b"\x02\x00" + struct.pack('>H', 87) + b"\x03\x03" + data #handshake_type, handshake_len_1, handshake_len_2, handshake_version + data = b"\x16\x03\x01" + struct.pack('>H', 91) + data #content_type, version, len + + data += b"\x14" + self.tls_version + b"\x00\x01\x01" #ChangeCipherSpec + + size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf)) + + data += b"\x16" + self.tls_version + struct.pack('>H', size) + buf[:size] + + if len(buf) - size > 0: + buf = buf[size:] + while len(buf) > 2048: + size = min(struct.unpack('>H', os.urandom(2))[0] % 4096 + 100, len(buf)) + data += b"\x17" + self.tls_version + struct.pack('>H', size) + buf[:size] + buf = buf[size:] + if len(buf) > 0: + data += b"\x17" + self.tls_version + struct.pack('>H', len(buf)) + buf + + self.obfs_stage += 1 + + return data + + def decode_error_return(self, buf): + self.deobfs_stage = -1 + self.obfs_stage = -1 + self.overhead = 0 + if self.method == 'simple_obfs_tls': + return (b'E'*2048, False, False) + return (buf, True, False) + + def server_decode(self, buf): + if self.deobfs_stage == -1: + return (buf, True, False) + + if self.deobfs_stage == 1: + ret = b'' + self.recv_buffer += buf + while len(self.recv_buffer) > 5: + if ord(self.recv_buffer[0]) != 0x17 or ord(self.recv_buffer[1]) != 0x3 or ord(self.recv_buffer[2]) != 0x3: + logging.info("data = %s" % (binascii.hexlify(self.recv_buffer))) + raise Exception('server_decode appdata error') + size = struct.unpack('>H', self.recv_buffer[3:5])[0] + if len(self.recv_buffer) < size + 5: + break + ret += self.recv_buffer[5:size+5] + self.recv_buffer = self.recv_buffer[size+5:] + return (ret, True, False) + + #raise Exception("handshake data = %s" % (binascii.hexlify(buf))) + + self.recv_buffer += buf + buf = self.recv_buffer + ogn_buf = buf + if len(buf) < 5: + return (b'', False, False) + if not match_begin(buf, b'\x16\x03\x01'): + return self.decode_error_return(ogn_buf) + buf = buf[3:] + header_len = struct.unpack('>H', buf[:2])[0] + if header_len > len(buf) - 2: + return (b'', False, False) + + self.recv_buffer = self.recv_buffer[header_len + 5:] + self.deobfs_stage = 1 + buf = buf[2:header_len + 2] + if not match_begin(buf, b'\x01\x00'): #client hello + logging.info("tls_auth not client hello message") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + if struct.unpack('>H', buf[:2])[0] != len(buf) - 2: + logging.info("tls_auth wrong message size") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + if not match_begin(buf, self.tls_version): + logging.info("tls_auth wrong tls version") + return self.decode_error_return(ogn_buf) + buf = buf[2:] + verifyid = buf[:32] + buf = buf[32:] + sessionid_len = ord(buf[0]) + if sessionid_len < 32: + logging.info("tls_auth wrong sessionid_len") + return self.decode_error_return(ogn_buf) + sessionid = buf[1:sessionid_len + 1] + buf = buf[sessionid_len+1:] + self.client_id = sessionid + utc_time = struct.unpack('>I', verifyid[:4])[0] + time_dif = common.int32((int(time.time()) & 0xffffffff) - utc_time) + if self.server_info.obfs_param: + try: + self.max_time_dif = int(self.server_info.obfs_param) + except: + pass + if self.max_time_dif > 0 and (time_dif < -self.max_time_dif or time_dif > self.max_time_dif \ + or common.int32(utc_time - self.server_info.data.startup_time) < -self.max_time_dif / 2): + logging.info("tls_auth wrong time") + return self.decode_error_return(ogn_buf) + if self.server_info.data.client_data.get(verifyid[:22]): + logging.info("replay attack detect, id = %s" % (binascii.hexlify(verifyid))) + return self.decode_error_return(ogn_buf) + self.server_info.data.client_data.sweep() + self.server_info.data.client_data[verifyid[:22]] = sessionid + # (buffer_to_recv, is_need_decrypt, is_need_to_encode_and_send_back) + + buf = buf[62:] + if not match_begin(buf, b'\x00\x23'): + logging.info("ext header error") + return self.decode_error_return(ogn_buf) + + buf = buf[2:] + ext_length = struct.unpack('>H', buf[:2])[0] + buf = buf[2:] + ret = buf[:ext_length] + if len(self.recv_buffer) > 0: + ret += self.server_decode(b'')[0] + buf = buf[ext_length:] + + host_name = b'' + buf = buf[7:] + host_name_len = struct.unpack('>H', buf[:2])[0] + buf = buf[2:] + hostname = buf[:host_name_len] + + host_name = common.to_str(hostname) + + return (ret, True, False, host_name) diff --git a/shadowsocks/obfsplugin/verify.py b/shadowsocks/obfsplugin/verify.py new file mode 100644 index 0000000..38fd677 --- /dev/null +++ b/shadowsocks/obfsplugin/verify.py @@ -0,0 +1,360 @@ +#!/usr/bin/env python +# +# Copyright 2015-2015 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import hashlib +import logging +import binascii +import base64 +import time +import datetime +import random +import struct +import zlib +import hmac +import hashlib + +import shadowsocks +from shadowsocks import common +from shadowsocks.obfsplugin import plain +from shadowsocks.common import to_bytes, to_str, ord, chr + +def create_verify_obfs(method): + return verify_simple(method) + +def create_verify_deflate(method): + return verify_deflate(method) + +def create_verify_sha1(method): + return verify_sha1(method) + +def create_auth_obfs(method): + return auth_simple(method) + +obfs_map = { + 'verify_simple': (create_verify_obfs,), + 'verify_deflate': (create_verify_deflate,), + 'verify_sha1': (create_verify_sha1,), + 'verify_sha1_compatible': (create_verify_sha1,), +} + +def match_begin(str1, str2): + if len(str1) >= len(str2): + if str1[:len(str2)] == str2: + return True + return False + +class obfs_verify_data(object): + def __init__(self): + pass + +class verify_base(plain.plain): + def __init__(self, method): + super(verify_base, self).__init__(method) + self.method = method + + def init_data(self): + return obfs_verify_data() + + def set_server_info(self, server_info): + self.server_info = server_info + + def client_encode(self, buf): + return buf + + def client_decode(self, buf): + return (buf, False) + + def server_encode(self, buf): + return buf + + def server_decode(self, buf): + return (buf, True, False) + +class verify_simple(verify_base): + def __init__(self, method): + super(verify_simple, self).__init__(method) + self.recv_buf = b'' + self.unit_len = 8100 + self.decrypt_packet_num = 0 + self.raw_trans = False + + def pack_data(self, buf): + if len(buf) == 0: + return b'' + rnd_data = os.urandom(common.ord(os.urandom(1)[0]) % 16) + data = common.chr(len(rnd_data) + 1) + rnd_data + buf + data = struct.pack('>H', len(data) + 6) + data + crc = (0xffffffff - binascii.crc32(data)) & 0xffffffff + data += struct.pack(' self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + if (binascii.crc32(self.recv_buf[:length]) & 0xffffffff) != 0xffffffff: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data uncorrect CRC32') + + pos = common.ord(self.recv_buf[2]) + 2 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return out_buf + + def server_pre_encrypt(self, buf): + ret = b'' + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 8192 or length < 7: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + if (binascii.crc32(self.recv_buf[:length]) & 0xffffffff) != 0xffffffff: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data uncorrect CRC32') + + pos = common.ord(self.recv_buf[2]) + 2 + out_buf += self.recv_buf[pos:length - 4] + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return (out_buf, False) + +class verify_deflate(verify_base): + def __init__(self, method): + super(verify_deflate, self).__init__(method) + self.recv_buf = b'' + self.unit_len = 32700 + self.decrypt_packet_num = 0 + self.raw_trans = False + + def pack_data(self, buf): + if len(buf) == 0: + return b'' + data = zlib.compress(buf) + data = struct.pack('>H', len(data)) + data[2:] + return data + + def client_pre_encrypt(self, buf): + ret = b'' + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + if self.raw_trans: + return buf + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 32768 or length < 6: + self.raw_trans = True + self.recv_buf = b'' + raise Exception('client_post_decrypt data error') + if length > len(self.recv_buf): + break + + out_buf += zlib.decompress(b'x\x9c' + self.recv_buf[2:length]) + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return out_buf + + def server_pre_encrypt(self, buf): + ret = b'' + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + if length >= 32768 or length < 6: + self.raw_trans = True + self.recv_buf = b'' + if self.decrypt_packet_num == 0: + return (b'E'*2048, False) + else: + raise Exception('server_post_decrype data error') + if length > len(self.recv_buf): + break + + out_buf += zlib.decompress(b'\x78\x9c' + self.recv_buf[2:length]) + self.recv_buf = self.recv_buf[length:] + + if out_buf: + self.decrypt_packet_num += 1 + return (out_buf, False) + +class verify_sha1(verify_base): + def __init__(self, method): + super(verify_sha1, self).__init__(method) + self.recv_buf = b'' + self.unit_len = 8100 + self.raw_trans = False + self.pack_id = 0 + self.recv_id = 0 + self.has_sent_header = False + self.has_recv_header = False + + def pack_data(self, buf): + if len(buf) == 0: + return b'' + sha1data = hmac.new(self.server_info.iv + struct.pack('>I', self.pack_id), buf, hashlib.sha1).digest() + data = struct.pack('>H', len(buf)) + sha1data[:10] + buf + self.pack_id += 1 + return data + + def pack_auth_data(self, buf): + data = chr(ord(buf[0]) | 0x10) + buf[1:] + data += hmac.new(self.server_info.iv + self.server_info.key, data, hashlib.sha1).digest()[:10] + return data + + def client_pre_encrypt(self, buf): + ret = b'' + if not self.has_sent_header: + datalen = self.get_head_size(buf, 30) + ret += self.pack_auth_data(buf[:datalen]) + buf = buf[datalen:] + self.has_sent_header = True + while len(buf) > self.unit_len: + ret += self.pack_data(buf[:self.unit_len]) + buf = buf[self.unit_len:] + ret += self.pack_data(buf) + return ret + + def client_post_decrypt(self, buf): + return buf + + def server_pre_encrypt(self, buf): + return buf + + def not_match_return(self, buf): + self.raw_trans = True + if self.method == 'verify_sha1': + return (b'E'*2048, False) + return (buf, False) + + def server_post_decrypt(self, buf): + if self.raw_trans: + return (buf, False) + self.recv_buf += buf + out_buf = b'' + if not self.has_recv_header: + if len(self.recv_buf) < 2: + return (b'', False) + if (ord(self.recv_buf[0]) & 0x10) != 0x10: + return self.not_match_return(self.recv_buf) + head_size = self.get_head_size(self.recv_buf, 65536) + if len(self.recv_buf) < head_size + 10: + return self.not_match_return(self.recv_buf) + sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, self.recv_buf[:head_size], hashlib.sha1).digest()[:10] + if sha1data != self.recv_buf[head_size:head_size + 10]: + logging.error('server_post_decrype data uncorrect auth HMAC-SHA1') + return self.not_match_return(self.recv_buf) + out_buf = to_bytes(chr(ord(self.recv_buf[0]) & 0xEF)) + self.recv_buf[1:head_size] + self.recv_buf = self.recv_buf[head_size + 10:] + self.has_recv_header = True + while len(self.recv_buf) > 2: + length = struct.unpack('>H', self.recv_buf[:2])[0] + 12 + if length > len(self.recv_buf): + break + + data = self.recv_buf[12:length] + sha1data = hmac.new(self.server_info.recv_iv + struct.pack('>I', self.recv_id), data, hashlib.sha1).digest()[:10] + if sha1data != self.recv_buf[2:12]: + raise Exception('server_post_decrype data uncorrect chunk HMAC-SHA1') + + self.recv_id = (self.recv_id + 1) & 0xFFFFFFFF + out_buf += data + self.recv_buf = self.recv_buf[length:] + + return (out_buf, False) + + def client_udp_pre_encrypt(self, buf): + ret = self.pack_auth_data(buf) + return chr(ord(buf[0]) | 0x10) + buf[1:] + + def server_udp_post_decrypt(self, buf): + if buf and ((ord(buf[0]) & 0x10) == 0x10): + if len(buf) <= 11: + return (b'', None) + sha1data = hmac.new(self.server_info.recv_iv + self.server_info.key, buf[:-10], hashlib.sha1).digest()[:10] + if sha1data != buf[-10:]: + return (b'', None) + return (to_bytes(chr(ord(buf[0]) & 0xEF)) + buf[1:-10], None) + else: + return (buf, None) + diff --git a/shadowsocks/ordereddict.py b/shadowsocks/ordereddict.py new file mode 100644 index 0000000..4b8e87e --- /dev/null +++ b/shadowsocks/ordereddict.py @@ -0,0 +1,218 @@ +import collections + +########################################################################## +# OrderedDict +########################################################################## + + +class OrderedDict(dict): + 'Dictionary that remembers insertion order' + # An inherited dict maps keys to values. + # The inherited dict provides __getitem__, __len__, __contains__, and get. + # The remaining methods are order-aware. + # Big-O running times for all methods are the same as regular dictionaries. + + # The internal self.__map dict maps keys to links in a doubly linked list. + # The circular doubly linked list starts and ends with a sentinel element. + # The sentinel element never gets deleted (this simplifies the algorithm). + # Each link is stored as a list of length three: [PREV, NEXT, KEY]. + + def __init__(*args, **kwds): + '''Initialize an ordered dictionary. The signature is the same as + regular dictionaries, but keyword arguments are not recommended because + their insertion order is arbitrary. + + ''' + if not args: + raise TypeError("descriptor '__init__' of 'OrderedDict' object " + "needs an argument") + self = args[0] + args = args[1:] + if len(args) > 1: + raise TypeError('expected at most 1 arguments, got %d' % len(args)) + try: + self.__root + except AttributeError: + self.__root = root = [] # sentinel node + root[:] = [root, root, None] + self.__map = {} + self.__update(*args, **kwds) + + def __setitem__(self, key, value, dict_setitem=dict.__setitem__): + 'od.__setitem__(i, y) <==> od[i]=y' + # Setting a new item creates a new link at the end of the linked list, + # and the inherited dictionary is updated with the new key/value pair. + if key not in self: + root = self.__root + last = root[0] + last[1] = root[0] = self.__map[key] = [last, root, key] + return dict_setitem(self, key, value) + + def __delitem__(self, key, dict_delitem=dict.__delitem__): + 'od.__delitem__(y) <==> del od[y]' + # Deleting an existing item uses self.__map to find the link which gets + # removed by updating the links in the predecessor and successor nodes. + dict_delitem(self, key) + link_prev, link_next, _ = self.__map.pop(key) + # update link_prev[NEXT] + link_prev[1] = link_next + # update link_next[PREV] + link_next[0] = link_prev + + def __iter__(self): + 'od.__iter__() <==> iter(od)' + # Traverse the linked list in order. + root = self.__root + # start at the first node + curr = root[1] + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[1] # move to next node + + def __reversed__(self): + 'od.__reversed__() <==> reversed(od)' + # Traverse the linked list in reverse order. + root = self.__root + # start at the last node + curr = root[0] + while curr is not root: + yield curr[2] # yield the curr[KEY] + curr = curr[0] # move to previous node + + def clear(self): + 'od.clear() -> None. Remove all items from od.' + root = self.__root + root[:] = [root, root, None] + self.__map.clear() + dict.clear(self) + + # -- the following methods do not depend on the internal structure -- + + def keys(self): + 'od.keys() -> list of keys in od' + return list(self) + + def values(self): + 'od.values() -> list of values in od' + return [self[key] for key in self] + + def items(self): + 'od.items() -> list of (key, value) pairs in od' + return [(key, self[key]) for key in self] + + def iterkeys(self): + 'od.iterkeys() -> an iterator over the keys in od' + return iter(self) + + def itervalues(self): + 'od.itervalues -> an iterator over the values in od' + for k in self: + yield self[k] + + def iteritems(self): + 'od.iteritems -> an iterator over the (key, value) pairs in od' + for k in self: + yield (k, self[k]) + + update = collections.MutableMapping.update + + __update = update # let subclasses override update without breaking __init__ + + __marker = object() + + def pop(self, key, default=__marker): + '''od.pop(k[,d]) -> v, remove specified key and return the corresponding + value. If key is not found, d is returned if given, otherwise KeyError + is raised. + + ''' + if key in self: + result = self[key] + del self[key] + return result + if default is self.__marker: + raise KeyError(key) + return default + + def setdefault(self, key, default=None): + 'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od' + if key in self: + return self[key] + self[key] = default + return default + + def popitem(self, last=True): + '''od.popitem() -> (k, v), return and remove a (key, value) pair. + Pairs are returned in LIFO order if last is true or FIFO order if false. + + ''' + if not self: + raise KeyError('dictionary is empty') + key = next(reversed(self) if last else iter(self)) + value = self.pop(key) + return key, value + + def __repr__(self, _repr_running={}): + 'od.__repr__() <==> repr(od)' + call_key = id(self), _get_ident() + if call_key in _repr_running: + return '...' + _repr_running[call_key] = 1 + try: + if not self: + return '%s()' % (self.__class__.__name__,) + return '%s(%r)' % (self.__class__.__name__, self.items()) + finally: + del _repr_running[call_key] + + def __reduce__(self): + 'Return state information for pickling' + items = [[k, self[k]] for k in self] + inst_dict = vars(self).copy() + for k in vars(OrderedDict()): + inst_dict.pop(k, None) + if inst_dict: + return (self.__class__, (items,), inst_dict) + return self.__class__, (items,) + + def copy(self): + 'od.copy() -> a shallow copy of od' + return self.__class__(self) + + @classmethod + def fromkeys(cls, iterable, value=None): + '''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S. + If not specified, the value defaults to None. + + ''' + self = cls() + for key in iterable: + self[key] = value + return self + + def __eq__(self, other): + '''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive + while comparison to a regular mapping is order-insensitive. + + ''' + if isinstance(other, OrderedDict): + return dict.__eq__(self, other) and all(_imap(_eq, self, other)) + return dict.__eq__(self, other) + + def __ne__(self, other): + 'od.__ne__(y) <==> od!=y' + return not self == other + + # -- the following methods support python 3.x style dictionary views -- + + def viewkeys(self): + "od.viewkeys() -> a set-like object providing a view on od's keys" + return KeysView(self) + + def viewvalues(self): + "od.viewvalues() -> an object providing a view on od's values" + return ValuesView(self) + + def viewitems(self): + "od.viewitems() -> a set-like object providing a view on od's items" + return ItemsView(self) diff --git a/shadowsocks/run.sh b/shadowsocks/run.sh new file mode 100644 index 0000000..7a91779 --- /dev/null +++ b/shadowsocks/run.sh @@ -0,0 +1,6 @@ +#!/bin/bash +cd `dirname $0` +eval $(ps -ef | grep "[0-9] python server\\.py a" | awk '{print "kill "$2}') +ulimit -n 4096 +nohup python server.py a >> /dev/null 2>&1 & + diff --git a/shadowsocks/server.py b/shadowsocks/server.py new file mode 100644 index 0000000..10287db --- /dev/null +++ b/shadowsocks/server.py @@ -0,0 +1,235 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import sys +import os +import logging +import signal + +if __name__ == '__main__': + import inspect + file_path = os.path.dirname( + os.path.realpath( + inspect.getfile( + inspect.currentframe()))) + sys.path.insert(0, os.path.join(file_path, '../')) + +from shadowsocks import shell, daemon, eventloop, tcprelay, udprelay, \ + asyncdns, manager + + +def main(): + shell.check_python() + + config = shell.get_config(False) + + shell.log_shadowsocks_version() + + daemon.daemon_exec(config) + + if config['port_password']: + pass + else: + config['port_password'] = {} + server_port = config['server_port'] + if isinstance(server_port, list): + for a_server_port in server_port: + config['port_password'][a_server_port] = config['password'] + else: + config['port_password'][str(server_port)] = config['password'] + + if not config.get('dns_ipv6', False): + asyncdns.IPV6_CONNECTION_SUPPORT = False + + if config.get('manager_address', 0): + logging.info('entering manager mode') + manager.run(config) + return + + tcp_servers = [] + udp_servers = [] + dns_resolver = asyncdns.DNSResolver() + if int(config['workers']) > 1: + stat_counter_dict = None + else: + stat_counter_dict = {} + port_password = config['port_password'] + config_password = config.get('password', 'm') + del config['port_password'] + for port, password_obfs in port_password.items(): + method = config["method"] + protocol = config.get("protocol", 'origin') + protocol_param = config.get("protocol_param", '') + obfs = config.get("obfs", 'plain') + obfs_param = config.get("obfs_param", '') + bind = config.get("out_bind", '') + bindv6 = config.get("out_bindv6", '') + if isinstance(password_obfs, list): + password = password_obfs[0] + obfs = password_obfs[1] + if len(password_obfs) > 2: + protocol = password_obfs[2] + elif isinstance(password_obfs, dict): + password = password_obfs.get('password', config_password) + method = password_obfs.get('method', method) + protocol = password_obfs.get('protocol', protocol) + protocol_param = password_obfs.get( + 'protocol_param', protocol_param) + obfs = password_obfs.get('obfs', obfs) + obfs_param = password_obfs.get('obfs_param', obfs_param) + bind = password_obfs.get('out_bind', bind) + bindv6 = password_obfs.get('out_bindv6', bindv6) + else: + password = password_obfs + a_config = config.copy() + ipv6_ok = False + logging.info( + "server start with protocol[%s] password [%s] method [%s] obfs [%s] obfs_param [%s]" % + (protocol, password, method, obfs, obfs_param)) + if 'server_ipv6' in a_config: + try: + if len(a_config['server_ipv6']) > 2 and a_config['server_ipv6'][ + 0] == "[" and a_config['server_ipv6'][-1] == "]": + a_config['server_ipv6'] = a_config['server_ipv6'][1:-1] + a_config['server_port'] = int(port) + a_config['password'] = password + a_config['method'] = method + a_config['protocol'] = protocol + a_config['protocol_param'] = protocol_param + a_config['obfs'] = obfs + a_config['obfs_param'] = obfs_param + a_config['out_bind'] = bind + a_config['out_bindv6'] = bindv6 + a_config['server'] = a_config['server_ipv6'] + logging.info("starting server at [%s]:%d" % + (a_config['server'], int(port))) + tcp_servers.append( + tcprelay.TCPRelay( + a_config, + dns_resolver, + False, + stat_counter=stat_counter_dict)) + udp_servers.append( + udprelay.UDPRelay( + a_config, + dns_resolver, + False, + stat_counter=stat_counter_dict)) + if a_config['server_ipv6'] == b"::": + ipv6_ok = True + except Exception as e: + shell.print_exception(e) + + try: + a_config = config.copy() + a_config['server_port'] = int(port) + a_config['password'] = password + a_config['method'] = method + a_config['protocol'] = protocol + a_config['protocol_param'] = protocol_param + a_config['obfs'] = obfs + a_config['obfs_param'] = obfs_param + a_config['out_bind'] = bind + a_config['out_bindv6'] = bindv6 + logging.info("starting server at %s:%d" % + (a_config['server'], int(port))) + tcp_servers.append( + tcprelay.TCPRelay( + a_config, + dns_resolver, + False, + stat_counter=stat_counter_dict)) + udp_servers.append( + udprelay.UDPRelay( + a_config, + dns_resolver, + False, + stat_counter=stat_counter_dict)) + except Exception as e: + if not ipv6_ok: + shell.print_exception(e) + + def run_server(): + def child_handler(signum, _): + logging.warn('received SIGQUIT, doing graceful shutting down..') + list(map(lambda s: s.close(next_tick=True), + tcp_servers + udp_servers)) + signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM), + child_handler) + + def int_handler(signum, _): + sys.exit(1) + signal.signal(signal.SIGINT, int_handler) + + try: + loop = eventloop.EventLoop() + dns_resolver.add_to_loop(loop) + list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers)) + + daemon.set_user(config.get('user', None)) + loop.run() + except Exception as e: + shell.print_exception(e) + sys.exit(1) + + if int(config['workers']) > 1: + if os.name == 'posix': + children = [] + is_child = False + for i in range(0, int(config['workers'])): + r = os.fork() + if r == 0: + logging.info('worker started') + is_child = True + run_server() + break + else: + children.append(r) + if not is_child: + def handler(signum, _): + for pid in children: + try: + os.kill(pid, signum) + os.waitpid(pid, 0) + except OSError: # child may already exited + pass + sys.exit() + signal.signal(signal.SIGTERM, handler) + signal.signal(signal.SIGQUIT, handler) + signal.signal(signal.SIGINT, handler) + + # master + for a_tcp_server in tcp_servers: + a_tcp_server.close() + for a_udp_server in udp_servers: + a_udp_server.close() + dns_resolver.close() + + for child in children: + os.waitpid(child, 0) + else: + logging.warn('worker is only available on Unix/Linux') + run_server() + else: + run_server() + + +if __name__ == '__main__': + main() diff --git a/shadowsocks/shell.py b/shadowsocks/shell.py new file mode 100644 index 0000000..87efd48 --- /dev/null +++ b/shadowsocks/shell.py @@ -0,0 +1,482 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import json +import sys +import getopt +import logging +from shadowsocks.common import to_bytes, to_str, IPNetwork, PortRange +from shadowsocks import encrypt + + +VERBOSE_LEVEL = 5 + +verbose = 0 + + +def check_python(): + info = sys.version_info + if info[0] == 2 and not info[1] >= 6: + print('Python 2.6+ required') + sys.exit(1) + elif info[0] == 3 and not info[1] >= 3: + print('Python 3.3+ required') + sys.exit(1) + elif info[0] not in [2, 3]: + print('Python version not supported') + sys.exit(1) + + +def print_exception(e): + global verbose + logging.error(e) + if verbose > 0: + import traceback + traceback.print_exc() + + +def print_shadowsocks(): + version_str = '' + try: + import pkg_resources + version_str = pkg_resources.get_distribution('shadowsocks').version + except Exception: + try: + from shadowsocks import version + version_str = version.version() + except Exception: + pass + print('ShadowsocksR %s' % version_str) + + +def log_shadowsocks_version(): + version_str = '' + try: + import pkg_resources + version_str = pkg_resources.get_distribution('shadowsocks').version + except Exception: + try: + from shadowsocks import version + version_str = version.version() + except Exception: + pass + logging.info('ShadowsocksR %s' % version_str) + + +def find_config(): + config_path = 'user-config.json' + if os.path.exists(config_path): + return config_path + config_path = os.path.join( + os.path.dirname(__file__), + '../', + 'user-config.json') + if os.path.exists(config_path): + return config_path + + config_path = 'config.json' + if os.path.exists(config_path): + return config_path + config_path = os.path.join(os.path.dirname(__file__), '../', 'config.json') + if os.path.exists(config_path): + return config_path + return None + +def find_custom_detect(): + config_path = 'user-detect.html' + if os.path.exists(config_path): + return config_path + config_path = os.path.join( + os.path.dirname(__file__), + '../', + 'user-detect.html') + if os.path.exists(config_path): + return config_path + + config_path = 'detect.html' + if os.path.exists(config_path): + return config_path + config_path = os.path.join(os.path.dirname(__file__), '../', 'detect.html') + if os.path.exists(config_path): + return config_path + return None + + +def check_config(config, is_local): + if config.get('daemon', None) == 'stop': + # no need to specify configuration for daemon stop + return + + if is_local and not config.get('password', None): + logging.error('password not specified') + print_help(is_local) + sys.exit(2) + + if not is_local and not config.get('password', None) \ + and not config.get('port_password', None): + logging.error('password or port_password not specified') + print_help(is_local) + sys.exit(2) + + if 'local_port' in config: + config['local_port'] = int(config['local_port']) + + if 'server_port' in config and not isinstance(config['server_port'], list): + config['server_port'] = int(config['server_port']) + + if config.get('local_address', '') in [b'0.0.0.0']: + logging.warn('warning: local set to listen on 0.0.0.0, it\'s not safe') + if config.get('server', '') in ['127.0.0.1', 'localhost']: + logging.warn('warning: server set to listen on %s:%s, are you sure?' % + (to_str(config['server']), config['server_port'])) + if config.get('timeout', 300) < 100: + logging.warn('warning: your timeout %d seems too short' % + int(config.get('timeout'))) + if config.get('timeout', 300) > 600: + logging.warn('warning: your timeout %d seems too long' % + int(config.get('timeout'))) + if config.get('password') in [b'mypassword']: + logging.error('DON\'T USE DEFAULT PASSWORD! Please change it in your ' + 'config.json!') + sys.exit(1) + if config.get('user', None) is not None: + if os.name != 'posix': + logging.error('user can be used only on Unix') + sys.exit(1) + + encrypt.try_cipher(config['password'], config['method']) + + +def get_config(is_local): + global verbose + + logging.basicConfig(level=logging.INFO, + format='%(levelname)-s: %(message)s') + if is_local: + shortopts = 'hd:s:b:p:k:l:m:O:o:G:g:c:t:vq' + longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'user=', + 'version'] + else: + shortopts = 'hd:s:p:k:m:O:o:G:g:c:t:vq' + longopts = ['help', 'fast-open', 'pid-file=', 'log-file=', 'workers=', + 'forbidden-ip=', 'user=', 'manager-address=', 'version'] + try: + config_path = find_config() + optlist, args = getopt.getopt(sys.argv[1:], shortopts, longopts) + for key, value in optlist: + if key == '-c': + config_path = value + + if config_path: + logging.info('loading config from %s' % config_path) + with open(config_path, 'rb') as f: + try: + config = parse_json_in_str( + remove_comment(f.read().decode('utf8'))) + except ValueError as e: + logging.error('found an error in config.json: %s', + e.message) + sys.exit(1) + else: + config = {} + + if config.get('friendly_detect', 0): + detect_path = find_custom_detect() + config['detect_block_html'] = '' + with open(detect_path, 'rb') as f: + config['detect_block_html'] = bytes(f.read()) + + + v_count = 0 + for key, value in optlist: + if key == '-p': + config['server_port'] = int(value) + elif key == '-k': + config['password'] = to_bytes(value) + elif key == '-l': + config['local_port'] = int(value) + elif key == '-s': + config['server'] = to_str(value) + elif key == '-m': + config['method'] = to_str(value) + elif key == '-O': + config['protocol'] = to_str(value) + elif key == '-o': + config['obfs'] = to_str(value) + elif key == '-G': + config['protocol_param'] = to_str(value) + elif key == '-g': + config['obfs_param'] = to_str(value) + elif key == '-b': + config['local_address'] = to_str(value) + elif key == '-v': + v_count += 1 + # '-vv' turns on more verbose mode + config['verbose'] = v_count + elif key == '-t': + config['timeout'] = int(value) + elif key == '--fast-open': + config['fast_open'] = True + elif key == '--workers': + config['workers'] = int(value) + elif key == '--manager-address': + config['manager_address'] = value + elif key == '--user': + config['user'] = to_str(value) + elif key == '--forbidden-ip': + config['forbidden_ip'] = to_str(value) + elif key in ('-h', '--help'): + if is_local: + print_local_help() + else: + print_server_help() + sys.exit(0) + elif key == '--version': + print_shadowsocks() + sys.exit(0) + elif key == '-d': + config['daemon'] = to_str(value) + elif key == '--pid-file': + config['pid-file'] = to_str(value) + elif key == '--log-file': + config['log-file'] = to_str(value) + elif key == '-q': + v_count -= 1 + config['verbose'] = v_count + except getopt.GetoptError as e: + print(e, file=sys.stderr) + print_help(is_local) + sys.exit(2) + + if not config: + logging.error('config not specified') + print_help(is_local) + sys.exit(2) + + config['password'] = to_bytes(config.get('password', b'')) + config['method'] = to_str(config.get('method', 'aes-256-cfb')) + config['protocol'] = to_str(config.get('protocol', 'origin')) + config['protocol_param'] = to_str(config.get('protocol_param', '')) + config['obfs'] = to_str(config.get('obfs', 'plain')) + config['obfs_param'] = to_str(config.get('obfs_param', '')) + config['port_password'] = config.get('port_password', None) + config['timeout'] = int(config.get('timeout', 300)) + config['udp_timeout'] = int(config.get('udp_timeout', 120)) + config['udp_cache'] = int(config.get('udp_cache', 64)) + config['fast_open'] = config.get('fast_open', False) + config['workers'] = config.get('workers', 1) + config['friendly_detect'] = config.get('friendly_detect', 0) + config['pid-file'] = config.get('pid-file', '/var/run/shadowsocksr.pid') + config['log-file'] = config.get('log-file', '/var/log/shadowsocksr.log') + config['verbose'] = config.get('verbose', False) + config['redirect_verbose'] = config.get('redirect_verbose', True) + config['connect_verbose_info'] = config.get('connect_verbose_info', 0) + config['local_address'] = to_str(config.get('local_address', '127.0.0.1')) + config['local_port'] = config.get('local_port', 1080) + config['forbidden_ip'] = config.get('forbidden_ip', '') + config['forbidden_port'] = config.get('forbidden_port', '') + config['disconnect_ip'] = config.get('disconnect_ip', '') + + if is_local: + if config.get('server', None) is None: + logging.error('server addr not specified') + print_local_help() + sys.exit(2) + else: + config['server'] = to_str(config['server']) + else: + config['server'] = to_str(config.get('server', '0.0.0.0')) + try: + config['ignore_bind'] = \ + IPNetwork(config.get('ignore_bind', '127.0.0.0/8,::1/128,10.0.0.0/8,192.168.0.0/16')) + except Exception as e: + logging.error(e) + sys.exit(2) + config['server_port'] = config.get('server_port', 8388) + + logging.getLogger('').handlers = [] + logging.addLevelName(VERBOSE_LEVEL, 'VERBOSE') + if config['verbose'] >= 2: + level = VERBOSE_LEVEL + elif config['verbose'] == 1: + level = logging.DEBUG + elif config['verbose'] == -1: + level = logging.WARN + elif config['verbose'] <= -2: + level = logging.ERROR + else: + level = logging.INFO + verbose = config['verbose'] + logging.basicConfig( + level=level, + format='%(asctime)s %(levelname)-8s %(filename)s:%(lineno)s %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + + check_config(config, is_local) + + return config + + +def print_help(is_local): + if is_local: + print_local_help() + else: + print_server_help() + + +def print_local_help(): + print('''usage: sslocal [OPTION]... +A fast tunnel proxy that helps you bypass firewalls. + +You can supply configurations via either config file or command line arguments. + +Proxy options: + -c CONFIG path to config file + -s SERVER_ADDR server address + -p SERVER_PORT server port, default: 8388 + -b LOCAL_ADDR local binding address, default: 127.0.0.1 + -l LOCAL_PORT local port, default: 1080 + -k PASSWORD password + -m METHOD encryption method, default: aes-256-cfb + -o OBFS obfsplugin, default: http_simple + -t TIMEOUT timeout in seconds, default: 300 + --fast-open use TCP_FASTOPEN, requires Linux 3.7+ + +General options: + -h, --help show this help message and exit + -d start/stop/restart daemon mode + --pid-file PID_FILE pid file for daemon mode + --log-file LOG_FILE log file for daemon mode + --user USER username to run as + -v, -vv verbose mode + -q, -qq quiet mode, only show warnings/errors + --version show version information + +Online help: +''') + + +def print_server_help(): + print('''usage: ssserver [OPTION]... +A fast tunnel proxy that helps you bypass firewalls. + +You can supply configurations via either config file or command line arguments. + +Proxy options: + -c CONFIG path to config file + -s SERVER_ADDR server address, default: 0.0.0.0 + -p SERVER_PORT server port, default: 8388 + -k PASSWORD password + -m METHOD encryption method, default: aes-256-cfb + -o OBFS obfsplugin, default: http_simple + -t TIMEOUT timeout in seconds, default: 300 + --fast-open use TCP_FASTOPEN, requires Linux 3.7+ + --workers WORKERS number of workers, available on Unix/Linux + --forbidden-ip IPLIST comma seperated IP list forbidden to connect + --manager-address ADDR optional server manager UDP address, see wiki + +General options: + -h, --help show this help message and exit + -d start/stop/restart daemon mode + --pid-file PID_FILE pid file for daemon mode + --log-file LOG_FILE log file for daemon mode + --user USER username to run as + -v, -vv verbose mode + -q, -qq quiet mode, only show warnings/errors + --version show version information + +Online help: +''') + + +def _decode_list(data): + rv = [] + for item in data: + if hasattr(item, 'encode'): + item = item.encode('utf-8') + elif isinstance(item, list): + item = _decode_list(item) + elif isinstance(item, dict): + item = _decode_dict(item) + rv.append(item) + return rv + + +def _decode_dict(data): + rv = {} + for key, value in data.items(): + if hasattr(value, 'encode'): + value = value.encode('utf-8') + elif isinstance(value, list): + value = _decode_list(value) + elif isinstance(value, dict): + value = _decode_dict(value) + rv[key] = value + return rv + + +class JSFormat: + + def __init__(self): + self.state = 0 + + def push(self, ch): + ch = ord(ch) + if self.state == 0: + if ch == ord('"'): + self.state = 1 + return to_str(chr(ch)) + elif ch == ord('/'): + self.state = 3 + else: + return to_str(chr(ch)) + elif self.state == 1: + if ch == ord('"'): + self.state = 0 + return to_str(chr(ch)) + elif ch == ord('\\'): + self.state = 2 + return to_str(chr(ch)) + elif self.state == 2: + self.state = 1 + if ch == ord('"'): + return to_str(chr(ch)) + return "\\" + to_str(chr(ch)) + elif self.state == 3: + if ch == ord('/'): + self.state = 4 + else: + return "/" + to_str(chr(ch)) + elif self.state == 4: + if ch == ord('\n'): + self.state = 0 + return "\n" + return "" + + +def remove_comment(json): + fmt = JSFormat() + return "".join([fmt.push(c) for c in json]) + + +def parse_json_in_str(data): + # parse json and convert everything from unicode to str + return json.loads(data, object_hook=_decode_dict) diff --git a/shadowsocks/stop.sh b/shadowsocks/stop.sh new file mode 100644 index 0000000..af1fbf9 --- /dev/null +++ b/shadowsocks/stop.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +eval $(ps -ef | grep "[0-9] python server\\.py a" | awk '{print "kill "$2}') diff --git a/shadowsocks/tail.sh b/shadowsocks/tail.sh new file mode 100644 index 0000000..aa37139 --- /dev/null +++ b/shadowsocks/tail.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +tail -f ssserver.log diff --git a/shadowsocks/tcprelay.py b/shadowsocks/tcprelay.py new file mode 100644 index 0000000..a7038fb --- /dev/null +++ b/shadowsocks/tcprelay.py @@ -0,0 +1,2170 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import time +import socket +import errno +import struct +import logging +import binascii +import traceback +import random +import platform +import threading + +from shadowsocks import encrypt, obfs, eventloop, shell, common +from shadowsocks.common import pre_parse_header, parse_header, IPNetwork, PortRange + +# we clear at most TIMEOUTS_CLEAN_SIZE timeouts each time +TIMEOUTS_CLEAN_SIZE = 512 + +MSG_FASTOPEN = 0x20000000 + +# SOCKS command definition +CMD_CONNECT = 1 +CMD_BIND = 2 +CMD_UDP_ASSOCIATE = 3 + +# for each opening port, we have a TCP Relay + +# for each connection, we have a TCP Relay Handler to handle the connection + +# for each handler, we have 2 sockets: +# local: connected to the client +# remote: connected to remote server + +# for each handler, it could be at one of several stages: + +# as sslocal: +# stage 0 SOCKS hello received from local, send hello to local +# stage 1 addr received from local, query DNS for remote +# stage 2 UDP assoc +# stage 3 DNS resolved, connect to remote +# stage 4 still connecting, more data from local received +# stage 5 remote connected, piping local and remote + +# as ssserver: +# stage 0 just jump to stage 1 +# stage 1 addr received from local, query DNS for remote +# stage 3 DNS resolved, connect to remote +# stage 4 still connecting, more data from local received +# stage 5 remote connected, piping local and remote + +STAGE_INIT = 0 +STAGE_ADDR = 1 +STAGE_UDP_ASSOC = 2 +STAGE_DNS = 3 +STAGE_CONNECTING = 4 +STAGE_STREAM = 5 +STAGE_DESTROYED = -1 + +# for each handler, we have 2 stream directions: +# upstream: from client to server direction +# read local and write to remote +# downstream: from server to client direction +# read remote and write to local + +STREAM_UP = 0 +STREAM_DOWN = 1 + +# for each stream, it's waiting for reading, or writing, or both +WAIT_STATUS_INIT = 0 +WAIT_STATUS_READING = 1 +WAIT_STATUS_WRITING = 2 +WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING + +NETWORK_MTU = 1500 +TCP_MSS = NETWORK_MTU - 40 +BUF_SIZE = 32 * 1024 +UDP_MAX_BUF_SIZE = 65536 + + +class SpeedTester(object): + + def __init__(self, max_speed=0): + self.max_speed = max_speed * 1024 + self.last_time = time.time() + self.sum_len = 0 + + def add(self, data_len): + if self.max_speed > 0: + cut_t = time.time() + self.sum_len -= (cut_t - self.last_time) * self.max_speed + if self.sum_len < 0: + self.sum_len = 0 + self.last_time = cut_t + self.sum_len += data_len + + def isExceed(self): + if self.max_speed > 0: + cut_t = time.time() + self.sum_len -= (cut_t - self.last_time) * self.max_speed + if self.sum_len < 0: + self.sum_len = 0 + self.last_time = cut_t + return self.sum_len >= self.max_speed + return False + +class TCPRelayHandler(object): + + def __init__(self, server, fd_to_handlers, loop, local_sock, config, + dns_resolver, is_local): + self._server = server + self._fd_to_handlers = fd_to_handlers + self._loop = loop + self._local_sock = local_sock + self._remote_sock = None + self._remote_sock_v6 = None + self._local_sock_fd = None + self._remote_sock_fd = None + self._remotev6_sock_fd = None + self._remote_udp = False + self._config = config + self._dns_resolver = dns_resolver + self._current_user_id = 0 + self._relay_rules = server.relay_rules.copy() + self._is_relay = False + self._add_ref = 0 + if not self._create_encryptor(config): + return + + self._client_address = local_sock.getpeername()[:2] + self._accept_address = local_sock.getsockname()[:2] + self._user = None + self._update_tcp_mss(local_sock) + + # TCP Relay works as either sslocal or ssserver + # if is_local, this is sslocal + self._is_local = is_local + self._encrypt_correct = True + self._obfs = obfs.obfs(config['obfs']) + self._protocol = obfs.obfs(config['protocol']) + self._overhead = self._obfs.get_overhead(self._is_local) + self._protocol.get_overhead(self._is_local) + self._recv_buffer_size = BUF_SIZE - self._overhead + + server_info = obfs.server_info(server.obfs_data) + server_info.host = config['server'] + server_info.port = server._listen_port + #server_info.users = server.server_users + #server_info.update_user_func = self._update_user + server_info.client = self._client_address[0] + server_info.client_port = self._client_address[1] + server_info.protocol_param = '' + server_info.obfs_param = config['obfs_param'] + + server_info.iv = self._encryptor.cipher_iv + server_info.recv_iv = b'' + server_info.key_str = common.to_bytes(config['password']) + server_info.key = self._encryptor.key + server_info.head_len = 30 + server_info.tcp_mss = self._tcp_mss + server_info.buffer_size = self._recv_buffer_size + server_info.overhead = self._overhead + self._obfs.set_server_info(server_info) + + server_info = obfs.server_info(server.protocol_data) + server_info.host = config['server'] + server_info.port = server._listen_port + if 'users_table' in config: + server_info.users = server.multi_user_table + else: + server_info.users = {} + server_info.update_user_func = self._update_user + server_info.is_multi_user = config["is_multi_user"] + server_info.client = self._client_address[0] + server_info.client_port = self._client_address[1] + server_info.protocol_param = config['protocol_param'] + server_info.obfs_param = '' + server_info.iv = self._encryptor.cipher_iv + server_info.recv_iv = b'' + server_info.key_str = common.to_bytes(config['password']) + server_info.key = self._encryptor.key + server_info.head_len = 30 + server_info.tcp_mss = self._tcp_mss + server_info.buffer_size = self._recv_buffer_size + server_info.overhead = self._overhead + self._protocol.set_server_info(server_info) + + self._redir_list = config.get('redirect', ["*#0.0.0.0:0"]) + self._is_redirect = False + self._bind = config.get('out_bind', '') + self._bindv6 = config.get('out_bindv6', '') + self._ignore_bind_list = config.get('ignore_bind', []) + + self._fastopen_connected = False + self._data_to_write_to_local = [] + self._data_to_write_to_remote = [] + self._udp_data_send_buffer = b'' + self._upstream_status = WAIT_STATUS_READING + self._downstream_status = WAIT_STATUS_INIT + self._remote_address = None + + self._header_buf = [] + if is_local: + self._chosen_server = self._get_a_server() + + self.last_activity = 0 + self._update_activity() + self._server.add_connection(1) + self._server.stat_add(self._client_address[0], 1) + + self._add_ref = 1 + + self._recv_u_max_size = BUF_SIZE + self._recv_d_max_size = BUF_SIZE + self._recv_pack_id = 0 + self._udp_send_pack_id = 0 + self._udpv6_send_pack_id = 0 + + local_sock.setblocking(False) + local_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + self._local_sock_fd = local_sock.fileno() + fd_to_handlers[self._local_sock_fd] = self + loop.add(local_sock, eventloop.POLL_IN | eventloop.POLL_ERR, self._server) + self._stage = STAGE_INIT + + def __hash__(self): + # default __hash__ is id / 16 + # we want to eliminate collisions + return id(self) + + @property + def remote_address(self): + return self._remote_address + + def _get_a_server(self): + server = self._config['server'] + server_port = self._config['server_port'] + if isinstance(server_port, list): + server_port = random.choice(server_port) + if isinstance(server, list): + server = random.choice(server) + logging.debug('chosen server: %s:%d', server, server_port) + return server, server_port + + def _update_tcp_mss(self, local_sock): + self._tcp_mss = TCP_MSS + try: + tcp_mss = local_sock.getsockopt(socket.SOL_TCP, socket.TCP_MAXSEG) + if tcp_mss > 500 and tcp_mss <= 1500: + self._tcp_mss = tcp_mss + logging.debug("TCP MSS = %d" % (self._tcp_mss,)) + except: + pass + + def _create_encryptor(self, config): + try: + self._encryptor = encrypt.Encryptor(config['password'], + config['method']) + return True + except Exception: + self._stage = STAGE_DESTROYED + logging.error('create encryptor fail at port %d', self._server._listen_port) + traceback.print_exc() + + def _update_user(self, user): + if self._current_user_id == 0: + self._current_user_id = int(user) + self.mu_reset_time = self._server.mu_reset_time[self._current_user_id] + if self._current_user_id not in self._server.mu_server_transfer_ul: + self._server.mu_server_transfer_ul[self._current_user_id] = 0 + if self._current_user_id not in self._server.mu_server_transfer_dl: + self._server.mu_server_transfer_dl[self._current_user_id] = 0 + if self._current_user_id not in self._server.mu_connected_iplist: + self._server.mu_connected_iplist[self._current_user_id] = [] + if self._current_user_id not in self._server.mu_detect_log_list: + self._server.mu_detect_log_list[self._current_user_id] = [] + + def _update_activity(self, data_len=0): + # tell the TCP Relay we have activities recently + # else it will think we are inactive and timed out + self._server.update_activity(self, data_len) + + def _update_stream(self, stream, status): + # update a stream to a new waiting status + + # check if status is changed + # only update if dirty + dirty = False + if stream == STREAM_DOWN: + if self._downstream_status != status: + self._downstream_status = status + dirty = True + elif stream == STREAM_UP: + if self._upstream_status != status: + self._upstream_status = status + dirty = True + if dirty: + if self._local_sock: + event = eventloop.POLL_ERR + if self._downstream_status & WAIT_STATUS_WRITING: + event |= eventloop.POLL_OUT + if self._upstream_status & WAIT_STATUS_READING: + event |= eventloop.POLL_IN + self._loop.modify(self._local_sock, event) + if self._remote_sock: + event = eventloop.POLL_ERR + if self._downstream_status & WAIT_STATUS_READING: + event |= eventloop.POLL_IN + if self._upstream_status & WAIT_STATUS_WRITING: + event |= eventloop.POLL_OUT + self._loop.modify(self._remote_sock, event) + if self._remote_sock_v6: + self._loop.modify(self._remote_sock_v6, event) + + def _write_to_sock(self, data, sock): + # write data to sock + # if only some of the data are written, put remaining in the buffer + # and update the stream to wait for writing + + if self._config['is_multi_user'] != 0 and self._current_user_id != 0: + if self._current_user_id not in self._server.multi_user_table: + self.destroy() + return False + if self._server.mu_reset_time[self._current_user_id] > self.mu_reset_time: + self.destroy() + return False + + if not sock: + return False + uncomplete = False + if self._remote_udp and sock == self._remote_sock: + try: + self._udp_data_send_buffer += data + #logging.info('UDP over TCP sendto %d %s' % (len(data), binascii.hexlify(data))) + while len(self._udp_data_send_buffer) > 6: + length = struct.unpack( + '>H', self._udp_data_send_buffer[:2])[0] + + if length > len(self._udp_data_send_buffer): + break + + data = self._udp_data_send_buffer[:length] + + self._udp_data_send_buffer = self._udp_data_send_buffer[ + length:] + + frag = common.ord(data[2]) + if frag != 0: + logging.warn( + 'drop a message since frag is %d' % + (frag,)) + continue + else: + data = data[3:] + header_result = parse_header(data) + if header_result is None: + continue + + connecttype, addrtype, dest_addr, dest_port, header_length = header_result + if (addrtype & 7) == 3: + af = common.is_ip(dest_addr) + if af == False: + handler = common.UDPAsyncDNSHandler(data[header_length:]) + handler.resolve(self._dns_resolver, (dest_addr, dest_port), self._handle_server_dns_resolved) + else: + return self._handle_server_dns_resolved("", (dest_addr, dest_port), dest_addr, data[header_length:]) + else: + return self._handle_server_dns_resolved("", (dest_addr, dest_port), dest_addr, data[header_length:]) + + except Exception as e: + #trace = traceback.format_exc() + # logging.error(trace) + error_no = eventloop.errno_from_exception(e) + if error_no in (errno.EAGAIN, errno.EINPROGRESS, + errno.EWOULDBLOCK): + uncomplete = True + else: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return False + return True + else: + try: + if self._encrypt_correct or self._is_relay: + if sock == self._remote_sock: + self._server.add_transfer_u( + self._current_user_id, len(data)) + self._update_activity(len(data)) + if data: + l = len(data) + s = sock.send(data) + if s < l: + data = data[s:] + uncomplete = True + else: + return + except (OSError, IOError) as e: + error_no = eventloop.errno_from_exception(e) + if error_no in (errno.EAGAIN, errno.EINPROGRESS, + errno.EWOULDBLOCK): + uncomplete = True + else: + # traceback.print_exc() + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return False + except Exception as e: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], + self._client_address[1])) + self.destroy() + return False + if uncomplete: + if sock == self._local_sock: + self._data_to_write_to_local.append(data) + self._update_stream(STREAM_DOWN, WAIT_STATUS_WRITING) + elif sock == self._remote_sock: + self._data_to_write_to_remote.append(data) + self._update_stream(STREAM_UP, WAIT_STATUS_WRITING) + else: + logging.error( + 'write_all_to_sock:unknown socket from %s:%d' % + (self._client_address[0], self._client_address[1])) + else: + if sock == self._local_sock: + self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) + elif sock == self._remote_sock: + self._update_stream(STREAM_UP, WAIT_STATUS_READING) + else: + logging.error( + 'write_all_to_sock:unknown socket from %s:%d' % + (self._client_address[0], self._client_address[1])) + return True + + def _handle_server_dns_resolved(self, error, remote_addr, server_addr, data): + if error: + return + try: + addrs = socket.getaddrinfo(server_addr, remote_addr[1], 0, socket.SOCK_DGRAM, socket.SOL_UDP) + if not addrs: # drop + return + af, socktype, proto, canonname, sa = addrs[0] + if af == socket.AF_INET6: + self._remote_sock_v6.sendto(data, (server_addr, remote_addr[1])) + if self._udpv6_send_pack_id == 0: + addr, port = self._remote_sock_v6.getsockname()[:2] + common.connect_log('UDPv6 sendto %s(%s):%d from %s:%d by user %d' % + (common.to_str(remote_addr[0]), common.to_str(server_addr), remote_addr[1], addr, port, self._current_user_id)) + self._udpv6_send_pack_id += 1 + else: + self._remote_sock.sendto(data, (server_addr, remote_addr[1])) + if self._udp_send_pack_id == 0: + addr, port = self._remote_sock.getsockname()[:2] + common.connect_log('UDP sendto %s(%s):%d from %s:%d by user %d' % + (common.to_str(remote_addr[0]), common.to_str(server_addr), remote_addr[1], addr, port, self._current_user_id)) + self._udp_send_pack_id += 1 + return True + except Exception as e: + shell.print_exception(e) + logging.error("exception from %s:%d" % (self._client_address[0], self._client_address[1])) + + def _get_redirect_host(self, client_address, ogn_data): + host_list = self._redir_list or ["*#0.0.0.0:0"] + + if not isinstance(host_list, list): + host_list = [host_list] + + items_sum = common.to_str(host_list[0]).rsplit('#', 1) + if len(items_sum) < 2: + hash_code = binascii.crc32(ogn_data) + addrs = socket.getaddrinfo( + client_address[0], + client_address[1], + 0, + socket.SOCK_STREAM, + socket.SOL_TCP) + af, socktype, proto, canonname, sa = addrs[0] + address_bytes = common.inet_pton(af, sa[0]) + if af == socket.AF_INET6: + addr = struct.unpack('>Q', address_bytes[8:])[0] + elif af == socket.AF_INET: + addr = struct.unpack('>I', address_bytes)[0] + else: + addr = 0 + + host_port = [] + match_port = False + for host in host_list: + items = common.to_str(host).rsplit(':', 1) + if len(items) > 1: + try: + port = int(items[1]) + if port == self._server._listen_port: + match_port = True + host_port.append((items[0], port)) + except: + pass + else: + host_port.append((host, 80)) + + if match_port: + last_host_port = host_port + host_port = [] + for host in last_host_port: + if host[1] == self._server._listen_port: + host_port.append(host) + + return host_port[ + ((hash_code & 0xffffffff) + addr) % + len(host_port)] + + else: + host_port = [] + for host in host_list: + items_sum = common.to_str(host).rsplit('#', 1) + items_match = common.to_str(items_sum[0]).rsplit(':', 1) + items = common.to_str(items_sum[1]).rsplit(':', 1) + if len(items_match) > 1: + if items_match[1] != "*": + try: + if self._server._listen_port != int(items_match[1]) and int(items_match[1]) != 0: + continue + except: + pass + + if items_match[0] != "*" and common.match_regex( + items_match[0], ogn_data) == False: + continue + if len(items) > 1: + try: + port = int(items[1]) + return (items[0], port) + except: + pass + else: + return (items[0], 80) + + return ("0.0.0.0", 0) + + def _get_relay_host(self, client_address, ogn_data): + for id in self._relay_rules: + if self._relay_rules[id]['port'] == 0: + port = self._server._listen_port + else: + port = self._relay_rules[id]['port'] + return (self._relay_rules[id]['dist_ip'], int(port)) + return (None, None) + + def _handel_normal_relay(self, client_address, ogn_data): + host, port = self._get_relay_host(client_address, ogn_data) + self._encrypt_correct = False + if port is None: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + self._is_relay = True + return data + ogn_data + + def _get_mu_relay_host(self, ogn_data): + + if self._current_user_id == 0: + return (None, None) + + for id in self._relay_rules: + if (self._relay_rules[id]['user_id'] == 0 and self._current_user_id != 0) or self._relay_rules[ + id]['user_id'] == self._current_user_id: + has_higher_priority = False + for priority_id in self._relay_rules: + if ( + ( + self._relay_rules[priority_id]['priority'] > self._relay_rules[id]['priority'] and self._relay_rules[id]['id'] != self._relay_rules[priority_id]['id']) or ( + self._relay_rules[priority_id]['priority'] == self._relay_rules[id]['priority'] and self._relay_rules[id]['id'] > self._relay_rules[priority_id]['id'])) and ( + self._relay_rules[priority_id]['user_id'] == self._current_user_id or self._relay_rules[priority_id]['user_id'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self._relay_rules[id]['dist_ip'] == '0.0.0.0': + continue + + if self._relay_rules[id]['port'] == 0: + port = self._server._listen_port + else: + port = self._relay_rules[id]['port'] + + return (self._relay_rules[id]['dist_ip'], int(port)) + return (None, None) + + def _handel_mu_relay(self, client_address, ogn_data): + host, port = self._get_mu_relay_host(ogn_data) + if host is None: + return ogn_data + self._encrypt_correct = False + if port is None: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + self._is_relay = True + return data + ogn_data + + def _handel_protocol_error(self, client_address, ogn_data): + if self._config['redirect_verbose']: + logging.warn( + "Protocol ERROR, TCP ogn data %s from %s:%d via port %d" % + (binascii.hexlify(ogn_data), + client_address[0], + client_address[1], + self._server._listen_port)) + if client_address[0] not in self._server.wrong_iplist and client_address[ + 0] != 0 and self._server.is_cleaning_wrong_iplist == False: + self._server.wrong_iplist[client_address[0]] = time.time() + self._encrypt_correct = False + # create redirect or disconnect by hash code + host, port = self._get_redirect_host(client_address, ogn_data) + if port == 0: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + if self._config['redirect_verbose']: + logging.warn( + "TCP data redir %s:%d %s" % + (host, port, binascii.hexlify(data))) + self._is_redirect = True + return data + ogn_data + + def _handel_mu_protocol_error(self, client_address, ogn_data): + if self._config['redirect_verbose']: + logging.warn( + "Protocol ERROR, TCP ogn data %s from %s:%d via port %d" % + (binascii.hexlify(ogn_data), + client_address[0], + client_address[1], + self._server._listen_port)) + if client_address[0] not in self._server.wrong_iplist and client_address[ + 0] != 0 and self._server.is_cleaning_wrong_iplist == False: + self._server.wrong_iplist[client_address[0]] = time.time() + self._encrypt_correct = False + # create redirect or disconnect by hash code + host, port = self._get_redirect_host(client_address, ogn_data) + if port == 0: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + if self._config['redirect_verbose']: + logging.warn( + "TCP data mu redir %s:%d %s" % + (host, port, binascii.hexlify(data))) + self._is_redirect = True + return data + ogn_data + + def _handle_stage_connecting(self, data): + if self._is_local: + if self._encryptor is not None: + data = self._protocol.client_pre_encrypt(data) + data = self._encryptor.encrypt(data) + data = self._obfs.client_encode(data) + if data: + self._data_to_write_to_remote.append(data) + if self._is_local and not self._fastopen_connected and \ + self._config['fast_open']: + # for sslocal and fastopen, we basically wait for data and use + # sendto to connect + try: + # only connect once + self._fastopen_connected = True + remote_sock = \ + self._create_remote_socket(self._chosen_server[0], + self._chosen_server[1]) + self._loop.add(remote_sock, eventloop.POLL_ERR, self._server) + data = b''.join(self._data_to_write_to_remote) + l = len(data) + s = remote_sock.sendto(data, MSG_FASTOPEN, self._chosen_server) + if s < l: + data = data[s:] + self._data_to_write_to_remote = [data] + else: + self._data_to_write_to_remote = [] + self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) + except (OSError, IOError) as e: + if eventloop.errno_from_exception(e) == errno.EINPROGRESS: + # in this case data is not sent at all + self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) + elif eventloop.errno_from_exception(e) == errno.ENOTCONN: + logging.error('fast open not supported on this OS') + self._config['fast_open'] = False + self.destroy() + else: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + + def _get_head_size(self, buf, def_value): + if len(buf) < 2: + return def_value + head_type = common.ord(buf[0]) & 0xF + if head_type == 1: + return 7 + if head_type == 4: + return 19 + if head_type == 3: + return 4 + common.ord(buf[1]) + return def_value + + def _handle_stage_addr(self, ogn_data, data): + is_error = False + try: + if self._is_local: + cmd = common.ord(data[1]) + if cmd == CMD_UDP_ASSOCIATE: + logging.debug('UDP associate') + if self._local_sock.family == socket.AF_INET6: + header = b'\x05\x00\x00\x04' + else: + header = b'\x05\x00\x00\x01' + addr, port = self._local_sock.getsockname()[:2] + addr_to_send = socket.inet_pton(self._local_sock.family, + addr) + port_to_send = struct.pack('>H', port) + self._write_to_sock(header + addr_to_send + port_to_send, + self._local_sock) + self._stage = STAGE_UDP_ASSOC + # just wait for the client to disconnect + return + elif cmd == CMD_CONNECT: + # just trim VER CMD RSV + data = data[3:] + else: + logging.error('invalid command %d', cmd) + self.destroy() + return + + before_parse_data = data + if self._is_local: + header_result = parse_header(data) + else: + if self._server._config["is_multi_user"] == 0: + if self._relay_rules != {}: + is_relay = True + else: + is_relay = False + else: + is_relay = self.is_match_relay_rule_mu() + if self._relay_rules != {} and is_relay: + if self._server._config["is_multi_user"] == 0: + data = self._handel_normal_relay( + self._client_address, ogn_data) + else: + data = self._handel_mu_relay( + self._client_address, ogn_data) + header_result = parse_header(data) + if header_result is not None: + try: + common.to_str(header_result[2]) + except Exception as e: + header_result = None + if header_result is None: + is_error = True + if self._server._config["is_multi_user"] == 0: + data = self._handel_protocol_error( + self._client_address, ogn_data) + else: + data = self._handel_mu_protocol_error( + self._client_address, ogn_data) + header_result = parse_header(data) + else: + data = pre_parse_header(data) + if data is None: + is_error = True + data = self._handel_protocol_error( + self._client_address, ogn_data) + header_result = parse_header(data) + if header_result is not None: + try: + common.to_str(header_result[2]) + except Exception as e: + header_result = None + if header_result is None: + is_error = True + data = self._handel_protocol_error( + self._client_address, ogn_data) + header_result = parse_header(data) + self._overhead = self._obfs.get_overhead(self._is_local) + self._protocol.get_overhead(self._is_local) + self._recv_buffer_size = BUF_SIZE - self._overhead + server_info = self._obfs.get_server_info() + server_info.buffer_size = self._recv_buffer_size + server_info = self._protocol.get_server_info() + server_info.buffer_size = self._recv_buffer_size + connecttype, addrtype, remote_addr, remote_port, header_length = header_result + if not self._server._connect_hex_data: + common.connect_log( + '%s connecting %s:%d from %s:%d via port %d' % + ((connecttype == 0) and 'TCP' or 'UDP', + common.to_str(remote_addr), + remote_port, + self._client_address[0], + self._client_address[1], + self._server._listen_port)) + if connecttype != 0: + pass + #common.connect_log('UDP over TCP by user %d' % + # (self._user_id, )) + else: + common.connect_log( + '%s connecting %s:%d from %s:%d via port %d,hex data : %s' % + ((connecttype == 0) and 'TCP' or 'UDP', + common.to_str(remote_addr), + remote_port, + self._client_address[0], + self._client_address[1], + self._server._listen_port, + binascii.hexlify(data))) + if not is_error: + if not self._server.is_pushing_detect_text_list: + for id in self._server.detect_text_list: + if common.match_regex( + self._server.detect_text_list[id]['regex'], str(data)): + if self._config[ + 'is_multi_user'] != 0 and self._current_user_id != 0: + if self._server.is_cleaning_mu_detect_log_list == False and id not in self._server.mu_detect_log_list[ + self._current_user_id]: + self._server.mu_detect_log_list[ + self._current_user_id].append(id) + else: + if self._server.is_cleaning_detect_log == False and id not in self._server.detect_log_list: + self._server.detect_log_list.append(id) + self._handle_detect_rule_match(remote_port) + raise Exception( + 'This connection match the regex: id:%d was reject,regex: %s ,%s connecting %s:%d from %s:%d via port %d' % + (self._server.detect_text_list[id]['id'], + self._server.detect_text_list[id]['regex'], + (connecttype == 0) and 'TCP' or 'UDP', + common.to_str(remote_addr), + remote_port, + self._client_address[0], + self._client_address[1], + self._server._listen_port)) + if not self._server.is_pushing_detect_hex_list: + for id in self._server.detect_hex_list: + if common.match_regex( + self._server.detect_hex_list[id]['regex'], + binascii.hexlify(data)): + if self._config[ + 'is_multi_user'] != 0 and self._current_user_id != 0: + if self._server.is_cleaning_mu_detect_log_list == False and id not in self._server.mu_detect_log_list[ + self._current_user_id]: + self._server.mu_detect_log_list[ + self._current_user_id].append(id) + else: + if self._server.is_cleaning_detect_log == False and id not in self._server.detect_log_list: + self._server.detect_log_list.append(id) + self._handle_detect_rule_match(remote_port) + raise Exception( + 'This connection match the regex: id:%d was reject,regex: %s ,connecting %s:%d from %s:%d via port %d' % + (self._server.detect_hex_list[id]['id'], + self._server.detect_hex_list[id]['regex'], + common.to_str(remote_addr), + remote_port, + self._client_address[0], + self._client_address[1], + self._server._listen_port)) + if self._config['is_multi_user'] == 0 and common.getRealIp(self._client_address[0]) not in self._server.connected_iplist and self._client_address[0] != 0 and self._server.is_cleaning_connected_iplist == False: + self._server.connected_iplist.append(common.getRealIp(self._client_address[0])) + + if self._config[ + 'is_multi_user'] != 0 and self._current_user_id != 0: + if common.getRealIp(self._client_address[0]) not in self._server.mu_connected_iplist[ + self._current_user_id] and self._client_address[0] != 0: + self._server.mu_connected_iplist[self._current_user_id].append(common.getRealIp(self._client_address[0])) + + if self._client_address[0] in self._server.wrong_iplist and self._client_address[ + 0] != 0 and self._server.is_cleaning_wrong_iplist == False: + del self._server.wrong_iplist[self._client_address[0]] + + self._remote_address = (common.to_str(remote_addr), remote_port) + self._remote_udp = (connecttype != 0) + # pause reading + self._update_stream(STREAM_UP, WAIT_STATUS_WRITING) + self._stage = STAGE_DNS + if self._is_local: + # forward address to remote + self._write_to_sock((b'\x05\x00\x00\x01' + b'\x00\x00\x00\x00\x10\x10'), + self._local_sock) + head_len = self._get_head_size(data, 30) + self._obfs.obfs.server_info.head_len = head_len + self._protocol.obfs.server_info.head_len = head_len + if self._encryptor is not None: + data = self._protocol.client_pre_encrypt(data) + data_to_send = self._encryptor.encrypt(data) + data_to_send = self._obfs.client_encode(data_to_send) + if data_to_send: + self._data_to_write_to_remote.append(data_to_send) + # notice here may go into _handle_dns_resolved directly + self._dns_resolver.resolve(self._chosen_server[0], + self._handle_dns_resolved) + else: + if len(data) > header_length: + if self._header_buf != []: + is_relay = self.is_match_relay_rule_mu() + if is_relay: + self._write_to_sock( + self._header_buf, self._remote_sock) + self._header_buf = [] + self._data_to_write_to_remote.append(data[header_length:]) + # notice here may go into _handle_dns_resolved directly + self._dns_resolver.resolve(remote_addr, + self._handle_dns_resolved) + except Exception as e: + self._log_error(e) + if self._config['verbose']: + traceback.print_exc() + self.destroy() + + def _socket_bind_addr(self, sock, af): + bind_addr = '' + if self._bind and af == socket.AF_INET: + bind_addr = self._bind + elif self._bindv6 and af == socket.AF_INET6: + bind_addr = self._bindv6 + else: + bind_addr = self._accept_address[0] + + bind_addr = bind_addr.replace("::ffff:", "") + if bind_addr in self._ignore_bind_list: + bind_addr = None + + if self._is_relay: + bind_addr = None + + if bind_addr: + local_addrs = socket.getaddrinfo( + bind_addr, 0, 0, socket.SOCK_STREAM, socket.SOL_TCP) + if local_addrs[0][0] == af: + logging.debug("bind %s" % (bind_addr,)) + sock.bind((bind_addr, 0)) + + def _create_remote_socket(self, ip, port): + if self._remote_udp: + addrs_v6 = socket.getaddrinfo( + "::", 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP) + addrs = socket.getaddrinfo( + "0.0.0.0", 0, 0, socket.SOCK_DGRAM, socket.SOL_UDP) + else: + addrs = socket.getaddrinfo( + ip, port, 0, socket.SOCK_STREAM, socket.SOL_TCP) + if len(addrs) == 0: + raise Exception("getaddrinfo failed for %s:%d" % (ip, port)) + af, socktype, proto, canonname, sa = addrs[0] + + if not self._remote_udp and self._is_redirect == False: + if self._server._config[ + "is_multi_user"] != 0 and self._current_user_id != 0: + if self._server.multi_user_table[ + self._current_user_id]['_forbidden_iplist']: + if common.to_str(sa[0]) in self._server.multi_user_table[ + self._current_user_id]['_forbidden_iplist']: + if self._remote_address: + raise Exception( + 'IP %s is in forbidden list, when connect to %s:%d via port %d' % + (common.to_str( + sa[0]), + self._remote_address[0], + self._remote_address[1], + self._server.multi_user_table[ + self._current_user_id]['port'])) + raise Exception('IP %s is in forbidden list, reject' % + common.to_str(sa[0])) + if self._server.multi_user_table[ + self._current_user_id]['_forbidden_portset']: + if sa[1] in self._server.multi_user_table[ + self._current_user_id]['_forbidden_portset']: + if self._remote_address: + raise Exception( + 'Port %d is in forbidden list, when connect to %s:%d via port %d' % + (sa[1], + self._remote_address[0], + self._remote_address[1], + self._server.multi_user_table[ + self._current_user_id]['port'])) + raise Exception( + 'Port %d is in forbidden list, reject' % + sa[1]) + if self._server.multi_user_table[ + self._current_user_id]['_disconnect_ipset']: + if self._client_address[0] in self._server.multi_user_table[ + self._current_user_id]['_disconnect_ipset']: + if self._remote_address: + raise Exception( + 'IP %s is in disconnect list, when connect to %s:%d via port %d' % + (self._client_address[0], + self._remote_address[0], + self._remote_address[1], + self._server.multi_user_table[ + self._current_user_id]['port'])) + raise Exception('IP %s is in disconnect list, reject' % + (self._client_address[0])) + else: + if self._server._forbidden_iplist: + if common.to_str(sa[0]) in self._server._forbidden_iplist: + if self._remote_address: + raise Exception( + 'IP %s is in forbidden list, when connect to %s:%d via port %d' % + (self._client_address[0], + self._remote_address[0], + self._remote_address[1], + self._server._listen_port)) + raise Exception('IP %s is in forbidden list, reject' % + common.to_str(sa[0])) + if self._server._forbidden_portset: + if sa[1] in self._server._forbidden_portset: + if self._remote_address: + raise Exception( + 'Port %d is in forbidden list, when connect to %s:%d via port %d' % + (sa[1], + self._remote_address[0], + self._remote_address[1], + self._server._listen_port)) + raise Exception( + 'Port %d is in forbidden list, reject' % + sa[1]) + if self._server._disconnect_ipset: + if self._client_address[0] in self._server._disconnect_ipset: + if self._remote_address: + raise Exception( + 'IP %s is in disconnect list, when connect to %s:%d via port %d' % + (self._client_address[0], + self._remote_address[0], + self._remote_address[1], + self._server._listen_port)) + raise Exception('IP %s is in disconnect list, reject' % + self._client_address[0]) + remote_sock = socket.socket(af, socktype, proto) + self._remote_sock = remote_sock + self._remote_sock_fd = remote_sock.fileno() + self._fd_to_handlers[self._remote_sock_fd] = self + + if self._remote_udp: + af, socktype, proto, canonname, sa = addrs_v6[0] + remote_sock_v6 = socket.socket(af, socktype, proto) + self._remote_sock_v6 = remote_sock_v6 + self._remotev6_sock_fd = remote_sock_v6.fileno() + self._fd_to_handlers[self._remotev6_sock_fd] = self + + remote_sock.setblocking(False) + if self._remote_udp: + remote_sock_v6.setblocking(False) + + if not self._is_local: + self._socket_bind_addr(remote_sock, af) + self._socket_bind_addr(remote_sock_v6, af) + else: + remote_sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) + if not self._is_local: + self._socket_bind_addr(remote_sock, af) + return remote_sock + + def _handle_dns_resolved(self, result, error): + if error: + self._log_error(error) + self.destroy() + return + if result: + ip = result[1] + if ip: + + try: + self._stage = STAGE_CONNECTING + remote_addr = ip + if self._is_local: + remote_port = self._chosen_server[1] + else: + remote_port = self._remote_address[1] + + if self._is_local and self._config['fast_open']: + # for fastopen: + # wait for more data to arrive and send them in one SYN + self._stage = STAGE_CONNECTING + # we don't have to wait for remote since it's not + # created + self._update_stream(STREAM_UP, WAIT_STATUS_READING) + # TODO when there is already data in this packet + else: + # else do connect + remote_sock = self._create_remote_socket(remote_addr, + remote_port) + if self._remote_udp: + self._loop.add(remote_sock, + eventloop.POLL_IN, + self._server) + if self._remote_sock_v6: + self._loop.add(self._remote_sock_v6, + eventloop.POLL_IN, + self._server) + else: + try: + remote_sock.connect( + (remote_addr, remote_port)) + except (OSError, IOError) as e: + if eventloop.errno_from_exception(e) in ( + errno.EINPROGRESS, errno.EWOULDBLOCK): + pass # always goto here + else: + raise e + + addr, port = self._remote_sock.getsockname()[:2] + common.connect_log('TCP connecting %s(%s):%d from %s:%d by user %d' % + (common.to_str(self._remote_address[0]), common.to_str(remote_addr), remote_port, addr, port, self._current_user_id)) + + self._loop.add(remote_sock, + eventloop.POLL_ERR | eventloop.POLL_OUT, + self._server) + self._stage = STAGE_CONNECTING + self._update_stream(STREAM_UP, WAIT_STATUS_READWRITING) + self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) + if self._remote_udp: + while self._data_to_write_to_remote: + data = self._data_to_write_to_remote[0] + del self._data_to_write_to_remote[0] + self._write_to_sock(data, self._remote_sock) + return + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + + def is_match_relay_rule_mu(self): + host, port = self._get_mu_relay_host('') + if host is None: + return False + else: + return True + + def _get_read_size(self, sock, recv_buffer_size, up): + if self._overhead == 0: + return recv_buffer_size + buffer_size = len(sock.recv(recv_buffer_size, socket.MSG_PEEK)) + frame_size = self._tcp_mss - self._overhead + if up: + buffer_size = min(buffer_size, self._recv_u_max_size) + self._recv_u_max_size = min(self._recv_u_max_size + frame_size, BUF_SIZE) + else: + buffer_size = min(buffer_size, self._recv_d_max_size) + self._recv_d_max_size = min(self._recv_d_max_size + frame_size, BUF_SIZE) + if buffer_size == recv_buffer_size: + return buffer_size + if buffer_size > frame_size: + buffer_size = int(buffer_size / frame_size) * frame_size + return buffer_size + + def _handle_detect_rule_match(self, port): + if port == 80 and self._config['friendly_detect']: + backdata = b'HTTP/1.0 200 OK\r\nConnection: close\r\nContent-Type: text/html; charset=utf-8\r\n\r\n' + self._config['detect_block_html'] + backdata = self._protocol.server_pre_encrypt(backdata) + backdata = self._encryptor.encrypt(backdata) + backdata = self._obfs.server_encode(backdata) + self._write_to_sock(backdata, self._local_sock) + + def _on_local_read(self): + # handle all local read events and dispatch them to methods for + # each stage + try: + if not self._local_sock: + return + is_local = self._is_local + if is_local: + recv_buffer_size = self._get_read_size(self._local_sock, self._recv_buffer_size, True) + else: + recv_buffer_size = BUF_SIZE + is_Failed = False + data = None + try: + data = self._local_sock.recv(recv_buffer_size) + except (OSError, IOError) as e: + if eventloop.errno_from_exception(e) in \ + (errno.ETIMEDOUT, errno.EAGAIN, errno.EWOULDBLOCK): + return + if not data: + self.destroy() + return + + self._server.speed_tester_u.add(len(data)) + if self._current_user_id != 0 and self._server._config[ + "is_multi_user"] != 0: + self._server.mu_speed_tester_u[ + self._current_user_id].add(len(data)) + + ogn_data = data + + is_relay = self.is_match_relay_rule_mu() + if not is_local and ( + (self._server._config["is_multi_user"] == 0 and self._relay_rules == {}) or ( + self._server._config["is_multi_user"] != 0 and ( + (self._current_user_id == 0 or is_relay == False) or self._relay_rules == {}))): + if self._encryptor is not None: + if self._encrypt_correct: + host = '' + try: + obfs_decode = self._obfs.server_decode(data) + if self._stage == STAGE_INIT: + self._overhead = self._obfs.get_overhead(self._is_local) + self._protocol.get_overhead(self._is_local) + server_info = self._protocol.get_server_info() + server_info.overhead = self._overhead + except Exception as e: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + need_sendback = False + if obfs_decode[2]: + host_name = '' + if self._server._config[ + "is_multi_user"] == 1 and self._current_user_id == 0: + if self._server._config[ + "obfs"] == b"tls1.2_ticket_auth" or self._server._config[ + "obfs"] == b"tls1.2_ticket_fastauth": + if(len(obfs_decode) > 3): + host = obfs_decode[3] + ":" + str(self._server._listen_port) + need_sendback = True + if obfs_decode[1]: + if self._server._config[ + "is_multi_user"] == 1 and self._current_user_id == 0: + if self._server._config["obfs"] in [b"http_simple", b"http_post", b"simple_obfs_tls", b"simple_obfs_http"]: + if(len(obfs_decode) > 3): + host = obfs_decode[3] + if not self._protocol.obfs.server_info.recv_iv: + iv_len = len( + self._protocol.obfs.server_info.iv) + self._protocol.obfs.server_info.recv_iv = obfs_decode[ + 0][:iv_len] + try: + data = self._encryptor.decrypt(obfs_decode[0]) + except Exception as e: + logging.error( + "decrypt data failed, exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + data = [0] + else: + data = obfs_decode[0] + + if self._server._config[ + "is_multi_user"] == 1 and self._current_user_id == 0: + try: + host_list = host.split(":", 2) + host_name = host_list[0] + if host_name in self._server.multi_user_host_table: + self._update_user( + self._server.multi_user_host_table[host_name]) + else: + logging.error( + 'The host:%s md5 is mismatch,so The connection has been rejected, when connect from %s:%d via port %d' % + (host_name, self._client_address[0], self._client_address[1], self._server._listen_port)) + is_Failed = True + except Exception as e: + logging.error( + 'The mu hostname is error,so The connection has been rejected, when connect from %s:%d via port %d' % + (self._client_address[0], self._client_address[1], self._server._listen_port)) + is_Failed = True + + try: + data, sendback = self._protocol.server_post_decrypt( + data) + + if self._server._config[ + "is_multi_user"] == 2 and self._current_user_id == 0 and data: + logging.error( + 'The port is multi user in single port only , but the key remote provided is error or empty, so The connection has been rejected, when connect from %s:%d via port %d' % + (self._client_address[0], self._client_address[1], self._server._listen_port)) + is_Failed = True + + if self._server._config[ + "is_multi_user"] == 2 and self._current_user_id == 0 and ogn_data: + self._header_buf = ogn_data[:] + + is_relay = self.is_match_relay_rule_mu() + + if not is_relay and need_sendback: + data_sendback = self._obfs.server_encode(b'') + try: + self._write_to_sock( + data_sendback, self._local_sock) + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + + if sendback and not is_relay: + backdata = self._protocol.server_pre_encrypt( + b'') + backdata = self._encryptor.encrypt(backdata) + backdata = self._obfs.server_encode(backdata) + try: + self._write_to_sock( + backdata, self._local_sock) + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + except Exception as e: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + + if is_Failed: + data = self._handel_mu_protocol_error( + self._client_address, ogn_data) + + if is_relay: + data = ogn_data + else: + return + if not data: + return + except Exception as e: + self._log_error(e) + if self._config['verbose']: + traceback.print_exc() + self.destroy() + if self._stage == STAGE_STREAM: + if self._is_local: + if self._encryptor is not None: + data = self._protocol.client_pre_encrypt(data) + data = self._encryptor.encrypt(data) + data = self._obfs.client_encode(data) + self._write_to_sock(data, self._remote_sock) + elif is_local and self._stage == STAGE_INIT: + # TODO check auth method + self._write_to_sock(b'\x05\00', self._local_sock) + self._stage = STAGE_ADDR + elif self._stage == STAGE_CONNECTING: + self._handle_stage_connecting(data) + elif (is_local and self._stage == STAGE_ADDR) or \ + (not is_local and self._stage == STAGE_INIT): + self._handle_stage_addr(ogn_data, data) + + def _on_remote_read(self, is_remote_sock): + if self._config['is_multi_user'] != 0 and self._current_user_id != 0: + if self._current_user_id not in self._server.multi_user_table: + self.destroy() + return + if self._server.mu_reset_time[self._current_user_id] > self.mu_reset_time: + self.destroy() + return + + # handle all remote read events + data = None + try: + if self._remote_udp: + if is_remote_sock: + data, addr = self._remote_sock.recvfrom(UDP_MAX_BUF_SIZE) + else: + data, addr = self._remote_sock_v6.recvfrom( + UDP_MAX_BUF_SIZE) + port = struct.pack('>H', addr[1]) + try: + ip = socket.inet_aton(addr[0]) + data = b'\x00\x01' + ip + port + data + except Exception as e: + ip = socket.inet_pton(socket.AF_INET6, addr[0]) + data = b'\x00\x04' + ip + port + data + size = len(data) + 2 + + data = struct.pack('>H', size) + data + #logging.info('UDP over TCP recvfrom %s:%d %d bytes to %s:%d' % (addr[0], addr[1], len(data), self._client_address[0], self._client_address[1])) + else: + if self._is_local: + recv_buffer_size = BUF_SIZE + else: + recv_buffer_size = self._get_read_size(self._remote_sock, self._recv_buffer_size, False) + data = self._remote_sock.recv(recv_buffer_size) + self._recv_pack_id += 1 + except (OSError, IOError) as e: + if eventloop.errno_from_exception(e) in ( + errno.ETIMEDOUT, + errno.EAGAIN, + errno.EWOULDBLOCK, + 10035): # errno.WSAEWOULDBLOCK + return + if not data: + self.destroy() + return + + self._server.speed_tester_d.add(len(data)) + if self._current_user_id != 0 and self._server._config[ + "is_multi_user"] != 0: + self._server.mu_speed_tester_d[ + self._current_user_id].add(len(data)) + + if self._encryptor is not None: + if self._is_local: + try: + obfs_decode = self._obfs.client_decode(data) + except Exception as e: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + if obfs_decode[1]: + send_back = self._obfs.client_encode(b'') + self._write_to_sock(send_back, self._remote_sock) + if not self._protocol.obfs.server_info.recv_iv: + iv_len = len(self._protocol.obfs.server_info.iv) + self._protocol.obfs.server_info.recv_iv = obfs_decode[ + 0][:iv_len] + try: + data = self._encryptor.decrypt(obfs_decode[0]) + except Exception as e: + logging.error( + "decrypt data failed, exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + try: + data = self._protocol.client_post_decrypt(data) + if self._recv_pack_id == 1: + self._tcp_mss = self._protocol.get_server_info().tcp_mss + except Exception as e: + shell.print_exception(e) + logging.error( + "exception from %s:%d" % + (self._client_address[0], self._client_address[1])) + self.destroy() + return + else: + if self._encrypt_correct: + data = self._protocol.server_pre_encrypt(data) + data = self._encryptor.encrypt(data) + data = self._obfs.server_encode(data) + if self._encrypt_correct or self._is_relay: + self._server.add_transfer_d(self._current_user_id, len(data)) + self._update_activity(len(data)) + else: + return + try: + self._write_to_sock(data, self._local_sock) + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error( + "exception from %s:%d" % + (self._client_address[0], + self._client_address[1])) + self.destroy() + + def _on_local_write(self): + # handle local writable event + if self._data_to_write_to_local: + data = b''.join(self._data_to_write_to_local) + self._data_to_write_to_local = [] + self._write_to_sock(data, self._local_sock) + else: + self._update_stream(STREAM_DOWN, WAIT_STATUS_READING) + + def _on_remote_write(self): + # handle remote writable event + + self._stage = STAGE_STREAM + if self._data_to_write_to_remote: + data = b''.join(self._data_to_write_to_remote) + self._data_to_write_to_remote = [] + self._write_to_sock(data, self._remote_sock) + else: + self._update_stream(STREAM_UP, WAIT_STATUS_READING) + + def _on_local_error(self): + logging.debug('got local error') + if self._local_sock: + logging.error(eventloop.get_sock_error(self._local_sock)) + logging.error( + "exception from %s:%d" % + (self._client_address[0], + self._client_address[1])) + self.destroy() + + def _on_remote_error(self): + logging.debug('got remote error') + if self._remote_sock: + logging.error(eventloop.get_sock_error(self._remote_sock)) + if self._remote_address: + logging.error( + "when connect to %s:%d from %s:%d via port %d" % + (self._remote_address[0], + self._remote_address[1], + self._client_address[0], + self._client_address[1], + self._server._listen_port)) + else: + logging.error( + "exception from %s:%d" % + (self._client_address[0], + self._client_address[1])) + self.destroy() + + def handle_event(self, sock, fd, event): + # handle all events in this handler and dispatch them to methods + handle = False + if self._stage == STAGE_DESTROYED: + logging.debug('ignore handle_event: destroyed') + return True + if fd == self._remote_sock_fd or fd == self._remotev6_sock_fd: + if event & eventloop.POLL_ERR: + handle = True + self._on_remote_error() + elif event & (eventloop.POLL_IN | eventloop.POLL_HUP): + is_exceed = False + if self._server.speed_tester_d.isExceed(): + is_exceed = True + if self._current_user_id != 0 and self._server._config[ + "is_multi_user"] != 0: + if self._server.mu_speed_tester_d[ + self._current_user_id].isExceed(): + is_exceed = True + if not is_exceed: + handle = True + self._on_remote_read(sock == self._remote_sock) + else: + self._recv_d_max_size = self._tcp_mss - self._overhead + elif event & eventloop.POLL_OUT: + handle = True + self._on_remote_write() + elif fd == self._local_sock_fd: + if event & eventloop.POLL_ERR: + handle = True + self._on_local_error() + if self._stage == STAGE_DESTROYED: + return True + elif event & (eventloop.POLL_IN | eventloop.POLL_HUP): + is_exceed = False + if self._server.speed_tester_u.isExceed(): + is_exceed = True + if self._current_user_id != 0 and self._server._config[ + "is_multi_user"] != 0: + if self._server.mu_speed_tester_u[ + self._current_user_id].isExceed(): + is_exceed = True + if not is_exceed: + handle = True + self._on_local_read() + else: + self._recv_u_max_size = self._tcp_mss - self._overhead + elif event & eventloop.POLL_OUT: + handle = True + self._on_local_write() + else: + logging.warn('unknown socket from %s:%d' % + (self._client_address[0], self._client_address[1])) + try: + self._loop.removefd(fd) + except Exception as e: + shell.print_exception(e) + try: + del self._fd_to_handlers[fd] + except Exception as e: + shell.print_exception(e) + sock.close() + + return handle + + def _log_error(self, e): + logging.error('%s when handling connection from %s:%d' % + (e, self._client_address[0], self._client_address[1])) + + def stage(self): + return self._stage + + def destroy(self): + # destroy the handler and release any resources + # promises: + # 1. destroy won't make another destroy() call inside + # 2. destroy releases resources so it prevents future call to destroy + # 3. destroy won't raise any exceptions + # if any of the promises are broken, it indicates a bug has been + # introduced! mostly likely memory leaks, etc + if self._stage == STAGE_DESTROYED: + # this couldn't happen + logging.debug('already destroyed') + return + self._stage = STAGE_DESTROYED + if self._remote_address: + logging.debug('destroy: %s:%d' % + self._remote_address) + else: + logging.debug('destroy') + if self._remote_sock: + logging.debug('destroying remote') + try: + self._loop.removefd(self._remote_sock_fd) + except Exception as e: + shell.print_exception(e) + try: + if self._remote_sock_fd is not None: + del self._fd_to_handlers[self._remote_sock_fd] + except Exception as e: + shell.print_exception(e) + self._remote_sock.close() + self._remote_sock = None + if self._remote_sock_v6: + logging.debug('destroying remote_v6') + try: + self._loop.removefd(self._remotev6_sock_fd) + except Exception as e: + shell.print_exception(e) + try: + if self._remotev6_sock_fd is not None: + del self._fd_to_handlers[self._remotev6_sock_fd] + except Exception as e: + shell.print_exception(e) + self._remote_sock_v6.close() + self._remote_sock_v6 = None + if self._local_sock: + logging.debug('destroying local') + try: + self._loop.removefd(self._local_sock_fd) + except Exception as e: + shell.print_exception(e) + try: + if self._local_sock_fd is not None: + del self._fd_to_handlers[self._local_sock_fd] + except Exception as e: + shell.print_exception(e) + self._local_sock.close() + self._local_sock = None + if self._obfs: + self._obfs.dispose() + self._obfs = None + if self._protocol: + self._protocol.dispose() + self._protocol = None + self._encryptor = None + self._dns_resolver.remove_callback(self._handle_dns_resolved) + self._server.remove_handler(self) + if self._add_ref > 0: + self._server.add_connection(-1) + self._server.stat_add(self._client_address[0], -1) + + +class TCPRelay(object): + + def __init__( + self, + config, + dns_resolver, + is_local, + stat_callback=None, + stat_counter=None): + self._config = config + self._is_local = is_local + self._dns_resolver = dns_resolver + self._closed = False + self._eventloop = None + self._fd_to_handlers = {} + self.server_transfer_ul = 0 + self.server_transfer_dl = 0 + self.mu_server_transfer_ul = {} + self.mu_server_transfer_dl = {} + self.server_connections = 0 + self.connected_iplist = [] + self.mu_connected_iplist = {} + self.is_cleaning_connected_iplist = False + self.is_cleaning_mu_connected_iplist = False + self.wrong_iplist = {} + self.is_cleaning_wrong_iplist = False + self.detect_log_list = [] + self.mu_detect_log_list = {} + + self.mu_speed_tester_u = {} + self.mu_speed_tester_d = {} + + self.relay_rules = self._config['relay_rules'].copy() + self.is_pushing_relay_rules = False + if 'users_table' in self._config: + self.multi_user_host_table = {} + self.multi_user_table = self._config['users_table'] + if 'node_speedlimit' not in config: + self.mu_bandwidth = 0 + else: + self.mu_bandwidth = float(config['node_speedlimit']) * 128 + + for id in self.multi_user_table: + self.multi_user_host_table[common.get_mu_host( + id, self.multi_user_table[id]['md5'])] = id + + if 'node_speedlimit' not in self.multi_user_table[id]: + bandwidth = max( + float(self.mu_bandwidth), float(0.00)) + else: + if float( + self.mu_bandwidth) > 0.0 or float( + self.multi_user_table[id]['node_speedlimit']) * 128 > 0.0: + bandwidth = max( + float( + self.mu_bandwidth), float( + self.multi_user_table[id]['node_speedlimit']) * 128) + else: + bandwidth = 0 + + self.mu_speed_tester_u[id] = SpeedTester(bandwidth) + self.mu_speed_tester_d[id] = SpeedTester(bandwidth) + + self.is_cleaning_detect_log = False + self.is_cleaning_mu_detect_log_list = False + + self.is_pushing_detect_hex_list = False + self.is_pushing_detect_text_list = False + self.detect_hex_list = self._config['detect_hex_list'].copy() + self.detect_text_list = self._config['detect_text_list'].copy() + + if 'forbidden_ip' in config: + self._forbidden_iplist = IPNetwork(config['forbidden_ip']) + else: + self._forbidden_iplist = None + if 'forbidden_port' in config: + self._forbidden_portset = PortRange(config['forbidden_port']) + else: + self._forbidden_portset = None + if 'disconnect_ip' in config: + self._disconnect_ipset = IPNetwork(config['disconnect_ip']) + else: + self._disconnect_ipset = None + + if config["is_multi_user"] != 0: + self.mu_reset_time = {} + for id in self.multi_user_table: + self.mu_reset_time[id] = time.time() + + if self.multi_user_table[id]['forbidden_ip'] is not None: + self.multi_user_table[id]['_forbidden_iplist'] = IPNetwork( + str(self.multi_user_table[id]['forbidden_ip'])) + else: + self.multi_user_table[id][ + '_forbidden_iplist'] = IPNetwork(str("")) + + if self.multi_user_table[id]['disconnect_ip'] is not None: + self.multi_user_table[id]['_disconnect_ipset'] = IPNetwork( + str(self.multi_user_table[id]['disconnect_ip'])) + else: + self.multi_user_table[id]['_disconnect_ipset'] = None + + if self.multi_user_table[id]['forbidden_port'] is not None: + self.multi_user_table[id]['_forbidden_portset'] = PortRange( + str(self.multi_user_table[id]['forbidden_port'])) + else: + self.multi_user_table[id][ + '_forbidden_portset'] = PortRange(str("")) + + if 'node_speedlimit' not in config or 'users_table' in self._config: + self.bandwidth = 0 + else: + self.bandwidth = float(config['node_speedlimit']) * 128 + + self.speed_tester_u = SpeedTester(self.bandwidth) + self.speed_tester_d = SpeedTester(self.bandwidth) + + self.protocol_data = obfs.obfs(config['protocol']).init_data() + self.obfs_data = obfs.obfs(config['obfs']).init_data() + + if config.get('connect_verbose_info', 0) > 0: + common.connect_log = logging.info + + if config.get('connect_hex_data', 0) > 0: + self._connect_hex_data = True + else: + self._connect_hex_data = False + + self._timeout = config['timeout'] + self._timeouts = [] # a list for all the handlers + # we trim the timeouts once a while + self._timeout_offset = 0 # last checked position for timeout + self._handler_to_timeouts = {} # key: handler value: index in timeouts + + if is_local: + listen_addr = config['local_address'] + listen_port = config['local_port'] + else: + listen_addr = config['server'] + listen_port = config['server_port'] + self._listen_port = listen_port + + addrs = socket.getaddrinfo(listen_addr, listen_port, 0, + socket.SOCK_STREAM, socket.SOL_TCP) + if len(addrs) == 0: + raise Exception("can't get addrinfo for %s:%d" % + (listen_addr, listen_port)) + af, socktype, proto, canonname, sa = addrs[0] + server_socket = socket.socket(af, socktype, proto) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + server_socket.bind(sa) + server_socket.setblocking(False) + if config['fast_open']: + try: + server_socket.setsockopt(socket.SOL_TCP, 23, 5) + except socket.error: + logging.error('warning: fast open is not available') + self._config['fast_open'] = False + server_socket.listen(config.get('max_connect', 1024)) + self._server_socket = server_socket + self._server_socket_fd = server_socket.fileno() + self._stat_counter = stat_counter + self._stat_callback = stat_callback + + def add_to_loop(self, loop): + if self._eventloop: + raise Exception('already add to loop') + if self._closed: + raise Exception('already closed') + self._eventloop = loop + self._eventloop.add(self._server_socket, + eventloop.POLL_IN | eventloop.POLL_ERR, self) + self._eventloop.add_periodic(self.handle_periodic) + + def remove_handler(self, handler): + index = self._handler_to_timeouts.get(hash(handler), -1) + if index >= 0: + # delete is O(n), so we just set it to None + self._timeouts[index] = None + del self._handler_to_timeouts[hash(handler)] + + def add_connection(self, val): + self.server_connections += val + logging.debug( + 'server port %5d connections = %d' % + (self._listen_port, self.server_connections,)) + + def add_transfer_u(self, user, transfer): + if ((user is None or user == 0) and self._config["is_multi_user"] != 0) or self._config["is_multi_user"] == 0: + self.server_transfer_ul += transfer + else: + if user not in self.mu_server_transfer_ul: + self.mu_server_transfer_ul[user] = 0 + self.mu_server_transfer_ul[ + user] += transfer + self.server_transfer_ul + self.server_transfer_ul = 0 + + def add_transfer_d(self, user, transfer): + if ((user is None or user == 0) and self._config["is_multi_user"] != 0) or self._config["is_multi_user"] == 0: + self.server_transfer_dl += transfer + else: + if user not in self.mu_server_transfer_dl: + self.mu_server_transfer_dl[user] = 0 + self.mu_server_transfer_dl[ + user] += transfer + self.server_transfer_dl + self.server_transfer_dl = 0 + + def update_stat(self, port, stat_dict, val): + newval = stat_dict.get(0, 0) + val + stat_dict[0] = newval + logging.debug('port %d connections %d' % (port, newval)) + connections_step = 25 + if newval >= stat_dict.get(-1, 0) + connections_step: + logging.info('port %d connections up to %d' % (port, newval)) + stat_dict[-1] = stat_dict.get(-1, 0) + connections_step + elif newval <= stat_dict.get(-1, 0) - connections_step: + logging.info('port %d connections down to %d' % (port, newval)) + stat_dict[-1] = stat_dict.get(-1, 0) - connections_step + + def stat_add(self, local_addr, val): + if self._stat_counter is not None: + if self._listen_port not in self._stat_counter: + self._stat_counter[self._listen_port] = {} + newval = self._stat_counter[ + self._listen_port].get( + local_addr, 0) + val + logging.debug( + 'port %d addr %s connections %d' % + (self._listen_port, local_addr, newval)) + self._stat_counter[self._listen_port][local_addr] = newval + self.update_stat( + self._listen_port, self._stat_counter[ + self._listen_port], val) + if newval <= 0: + if local_addr in self._stat_counter[self._listen_port]: + del self._stat_counter[self._listen_port][local_addr] + + newval = self._stat_counter.get(0, 0) + val + self._stat_counter[0] = newval + logging.debug('Total connections %d' % newval) + + connections_step = 50 + if newval >= self._stat_counter.get(-1, 0) + connections_step: + logging.info('Total connections up to %d' % newval) + self._stat_counter[ + -1] = self._stat_counter.get(-1, 0) + connections_step + elif newval <= self._stat_counter.get(-1, 0) - connections_step: + logging.info('Total connections down to %d' % newval) + self._stat_counter[ + -1] = self._stat_counter.get(-1, 0) - connections_step + + def update_activity(self, handler, data_len): + if data_len and self._stat_callback: + self._stat_callback(self._listen_port, data_len) + + # set handler to active + now = int(time.time()) + if now - handler.last_activity < eventloop.TIMEOUT_PRECISION: + # thus we can lower timeout modification frequency + return + handler.last_activity = now + index = self._handler_to_timeouts.get(hash(handler), -1) + if index >= 0: + # delete is O(n), so we just set it to None + self._timeouts[index] = None + length = len(self._timeouts) + self._timeouts.append(handler) + self._handler_to_timeouts[hash(handler)] = length + + def _sweep_timeout(self): + # tornado's timeout memory management is more flexible than we need + # we just need a sorted last_activity queue and it's faster than heapq + # in fact we can do O(1) insertion/remove so we invent our own + if self._timeouts: + logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts') + now = time.time() + length = len(self._timeouts) + pos = self._timeout_offset + while pos < length: + handler = self._timeouts[pos] + if handler: + if now - handler.last_activity < self._timeout: + break + else: + if handler.remote_address: + logging.debug('timed out: %s:%d' % + handler.remote_address) + else: + logging.debug('timed out') + handler.destroy() + self._timeouts[pos] = None # free memory + pos += 1 + else: + pos += 1 + if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1: + # clean up the timeout queue when it gets larger than half + # of the queue + self._timeouts = self._timeouts[pos:] + for key in self._handler_to_timeouts: + self._handler_to_timeouts[key] -= pos + pos = 0 + self._timeout_offset = pos + + def handle_event(self, sock, fd, event): + # handle events and dispatch to handlers + handle = False + if sock: + logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd, + eventloop.EVENT_NAMES.get(event, event)) + if sock == self._server_socket: + if event & eventloop.POLL_ERR: + # TODO + raise Exception('server_socket error') + handler = None + handle = True + try: + logging.debug('accept') + conn = self._server_socket.accept() + handler = TCPRelayHandler(self, self._fd_to_handlers, + self._eventloop, conn[0], self._config, + self._dns_resolver, self._is_local) + if handler.stage() == STAGE_DESTROYED: + conn[0].close() + except (OSError, IOError) as e: + error_no = eventloop.errno_from_exception(e) + if error_no in (errno.EAGAIN, errno.EINPROGRESS, + errno.EWOULDBLOCK): + return + else: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + if handler: + handler.destroy() + else: + if sock: + handler = self._fd_to_handlers.get(fd, None) + if handler: + handle = handler.handle_event(sock, fd, event) + else: + logging.warn('unknown fd') + handle = True + try: + self._eventloop.removefd(fd) + except Exception as e: + shell.print_exception(e) + sock.close() + else: + logging.warn('poll removed fd') + handle = True + if fd in self._fd_to_handlers: + try: + del self._fd_to_handlers[fd] + except Exception as e: + shell.print_exception(e) + return handle + + def handle_periodic(self): + if self._closed: + if self._server_socket: + if self._server_socket_fd: + self._eventloop.removefd(self._server_socket_fd) + self._server_socket_fd = 0 + self._server_socket.close() + self._server_socket = None + logging.info('closed TCP port %d', self._listen_port) + for handler in list(self._fd_to_handlers.values()): + handler.destroy() + self._sweep_timeout() + + def connected_iplist_clean(self): + self.is_cleaning_connected_iplist = True + del self.connected_iplist[:] + self.is_cleaning_connected_iplist = False + + def mu_connected_iplist_clean(self): + self.is_cleaning_mu_connected_iplist = True + for id in self.mu_connected_iplist: + del self.mu_connected_iplist[id][:] + self.is_cleaning_mu_connected_iplist = False + + def wrong_iplist_clean(self): + self.is_cleaning_wrong_iplist = True + + temp_new_list = {} + for key in self.wrong_iplist: + if self.wrong_iplist[key] > time.time() - 60: + temp_new_list[key] = self.wrong_iplist[key] + + self.wrong_iplist = temp_new_list.copy() + + self.is_cleaning_wrong_iplist = False + + def detect_log_list_clean(self): + self.is_cleaning_detect_log = True + del self.detect_log_list[:] + self.is_cleaning_detect_log = False + + def push_relay_rules(self, rules): + self.is_pushing_relay_rules = True + self.relay_rules = rules.copy() + self.is_pushing_relay_rules = False + + def mu_detect_log_list_clean(self): + self.is_cleaning_mu_detect_log_list = True + for id in self.mu_detect_log_list: + del self.mu_detect_log_list[id][:] + self.is_cleaning_mu_detect_log_list = False + + def reset_single_multi_user_traffic(self, user_id): + self.mu_reset_time[user_id] = time.time() + if user_id in self.mu_server_transfer_ul: + self.mu_server_transfer_ul[user_id] = 0 + if user_id in self.mu_server_transfer_dl: + self.mu_server_transfer_dl[user_id] = 0 + + def modify_multi_user_table(self, new_table): + self.multi_user_table = new_table.copy() + self.multi_user_host_table = {} + + for id in self.multi_user_table: + if id not in self.mu_reset_time: + self.mu_reset_time[id] = time.time() + + self.multi_user_host_table[common.get_mu_host( + id, self.multi_user_table[id]['md5'])] = id + if self.multi_user_table[id]['forbidden_ip'] is not None: + self.multi_user_table[id]['_forbidden_iplist'] = IPNetwork( + str(self.multi_user_table[id]['forbidden_ip'])) + else: + self.multi_user_table[id][ + '_forbidden_iplist'] = IPNetwork(str("")) + if self.multi_user_table[id]['disconnect_ip'] is not None: + self.multi_user_table[id]['_disconnect_ipset'] = IPNetwork( + str(self.multi_user_table[id]['disconnect_ip'])) + else: + self.multi_user_table[id]['_disconnect_ipset'] = None + if self.multi_user_table[id]['forbidden_port'] is not None: + self.multi_user_table[id]['_forbidden_portset'] = PortRange( + str(self.multi_user_table[id]['forbidden_port'])) + else: + self.multi_user_table[id][ + '_forbidden_portset'] = PortRange(str("")) + + if 'node_speedlimit' not in self.multi_user_table[id]: + bandwidth = max( + float(self.mu_bandwidth), float(0.00)) + else: + if float( + self.mu_bandwidth) > 0.0 or float( + self.multi_user_table[id]['node_speedlimit']) * 128 > 0.0: + bandwidth = max( + float( + self.mu_bandwidth), float( + self.multi_user_table[id]['node_speedlimit']) * 128) + else: + bandwidth = 0 + + self.mu_speed_tester_u[id] = SpeedTester(bandwidth) + self.mu_speed_tester_d[id] = SpeedTester(bandwidth) + + def modify_detect_text_list(self, new_list): + self.is_pushing_detect_text_list = True + self.detect_text_list = new_list.copy() + self.is_pushing_detect_text_list = False + + def modify_detect_hex_list(self, new_list): + self.is_pushing_detect_hex_list = True + self.detect_hex_list = new_list.copy() + self.is_pushing_detect_hex_list = False + + def close(self, next_tick=False): + logging.debug('TCP close') + self._closed = True + if not next_tick: + if self._eventloop: + self._eventloop.remove_periodic(self.handle_periodic) + if self._server_socket_fd: + self._eventloop.removefd(self._server_socket_fd) + self._server_socket_fd = 0 + self._server_socket.close() + for handler in list(self._fd_to_handlers.values()): + handler.destroy() diff --git a/shadowsocks/udprelay.py b/shadowsocks/udprelay.py new file mode 100644 index 0000000..1d061f3 --- /dev/null +++ b/shadowsocks/udprelay.py @@ -0,0 +1,1050 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# SOCKS5 UDP Request +# +----+------+------+----------+----------+----------+ +# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA | +# +----+------+------+----------+----------+----------+ +# | 2 | 1 | 1 | Variable | 2 | Variable | +# +----+------+------+----------+----------+----------+ + +# SOCKS5 UDP Response +# +----+------+------+----------+----------+----------+ +# |RSV | FRAG | ATYP | DST.ADDR | DST.PORT | DATA | +# +----+------+------+----------+----------+----------+ +# | 2 | 1 | 1 | Variable | 2 | Variable | +# +----+------+------+----------+----------+----------+ + +# shadowsocks UDP Request (before encrypted) +# +------+----------+----------+----------+ +# | ATYP | DST.ADDR | DST.PORT | DATA | +# +------+----------+----------+----------+ +# | 1 | Variable | 2 | Variable | +# +------+----------+----------+----------+ + +# shadowsocks UDP Response (before encrypted) +# +------+----------+----------+----------+ +# | ATYP | DST.ADDR | DST.PORT | DATA | +# +------+----------+----------+----------+ +# | 1 | Variable | 2 | Variable | +# +------+----------+----------+----------+ + +# shadowsocks UDP Request and Response (after encrypted) +# +-------+--------------+ +# | IV | PAYLOAD | +# +-------+--------------+ +# | Fixed | Variable | +# +-------+--------------+ + +# HOW TO NAME THINGS +# ------------------ +# `dest` means destination server, which is from DST fields in the SOCKS5 +# request +# `local` means local server of shadowsocks +# `remote` means remote server of shadowsocks +# `client` means UDP clients that connects to other servers +# `server` means the UDP server that handles user requests + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import time +import socket +import logging +import struct +import errno +import random +import binascii +import traceback + +from shadowsocks import encrypt, obfs, eventloop, lru_cache, common, shell +from shadowsocks.common import pre_parse_header, parse_header, pack_addr, IPNetwork, PortRange + +# for each handler, we have 2 stream directions: +# upstream: from client to server direction +# read local and write to remote +# downstream: from server to client direction +# read remote and write to local + +STREAM_UP = 0 +STREAM_DOWN = 1 + +# for each stream, it's waiting for reading, or writing, or both +WAIT_STATUS_INIT = 0 +WAIT_STATUS_READING = 1 +WAIT_STATUS_WRITING = 2 +WAIT_STATUS_READWRITING = WAIT_STATUS_READING | WAIT_STATUS_WRITING + +BUF_SIZE = 65536 +DOUBLE_SEND_BEG_IDS = 16 +POST_MTU_MIN = 500 +POST_MTU_MAX = 1400 +SENDING_WINDOW_SIZE = 8192 + +STAGE_INIT = 0 +STAGE_RSP_ID = 1 +STAGE_DNS = 2 +STAGE_CONNECTING = 3 +STAGE_STREAM = 4 +STAGE_DESTROYED = -1 + +CMD_CONNECT = 0 +CMD_RSP_CONNECT = 1 +CMD_CONNECT_REMOTE = 2 +CMD_RSP_CONNECT_REMOTE = 3 +CMD_POST = 4 +CMD_SYN_STATUS = 5 +CMD_POST_64 = 6 +CMD_SYN_STATUS_64 = 7 +CMD_DISCONNECT = 8 + +CMD_VER_STR = b"\x08" + +RSP_STATE_EMPTY = b"" +RSP_STATE_REJECT = b"\x00" +RSP_STATE_CONNECTED = b"\x01" +RSP_STATE_CONNECTEDREMOTE = b"\x02" +RSP_STATE_ERROR = b"\x03" +RSP_STATE_DISCONNECT = b"\x04" +RSP_STATE_REDIRECT = b"\x05" + +def client_key(source_addr, server_af): + # notice this is server af, not dest af + return '%s:%s:%d' % (source_addr[0], source_addr[1], server_af) + +class UDPRelay(object): + + def __init__( + self, + config, + dns_resolver, + is_local, + stat_callback=None, + stat_counter=None): + self._config = config + if config.get('connect_verbose_info', 0) > 0: + common.connect_log = logging.info + + if config.get('connect_hex_data', 0) > 0: + self._connect_hex_data = True + else: + self._connect_hex_data = False + + if is_local: + self._listen_addr = config['local_address'] + self._listen_port = config['local_port'] + self._remote_addr = config['server'] + self._remote_port = config['server_port'] + else: + self._listen_addr = config['server'] + self._listen_port = config['server_port'] + self._remote_addr = None + self._remote_port = None + self._dns_resolver = dns_resolver + self._password = common.to_bytes(config['password']) + self._method = config['method'] + self._timeout = config['timeout'] + self._is_local = is_local + self._udp_cache_size = config['udp_cache'] + self._cache = lru_cache.LRUCache( + timeout=config['udp_timeout'], + close_callback=self._close_client_pair) + self._cache_dns_client = lru_cache.LRUCache( + timeout=10, close_callback=self._close_client_pair) + self._client_fd_to_server_addr = {} + #self._dns_cache = lru_cache.LRUCache(timeout=1800) + self._eventloop = None + self._closed = False + self.server_transfer_ul = 0 + self.server_transfer_dl = 0 + + self.connected_iplist = [] + self.wrong_iplist = {} + self.detect_log_list = [] + + self.is_cleaning_connected_iplist = False + self.is_cleaning_wrong_iplist = False + self.is_cleaning_detect_log = False + self.is_cleaning_mu_detect_log_list = False + self.is_cleaning_mu_connected_iplist = False + + if 'users_table' in self._config: + self.multi_user_table = self._config['users_table'] + + self.mu_server_transfer_ul = {} + self.mu_server_transfer_dl = {} + self.mu_connected_iplist = {} + self.mu_detect_log_list = {} + + self.is_pushing_detect_hex_list = False + self.is_pushing_detect_text_list = False + self.detect_hex_list = self._config['detect_hex_list'].copy() + self.detect_text_list = self._config['detect_text_list'].copy() + + self.protocol_data = obfs.obfs(config['protocol']).init_data() + self._protocol = obfs.obfs(config['protocol']) + server_info = obfs.server_info(self.protocol_data) + server_info.host = self._listen_addr + server_info.port = self._listen_port + if 'users_table' in self._config: + server_info.users = self.multi_user_table + else: + server_info.users = {} + server_info.is_multi_user = config["is_multi_user"] + server_info.protocol_param = config['protocol_param'] + server_info.obfs_param = '' + server_info.iv = b'' + server_info.recv_iv = b'' + server_info.key_str = common.to_bytes(config['password']) + try: + server_info.key = encrypt.encrypt_key(self._password, self._method) + except Exception: + logging.error("UDP: method not support") + server_info.key = b'' + server_info.head_len = 30 + server_info.tcp_mss = 1452 + server_info.buffer_size = BUF_SIZE + server_info.overhead = 0 + self._protocol.set_server_info(server_info) + + self._sockets = set() + self._fd_to_handlers = {} + self._reqid_to_hd = {} + self._data_to_write_to_server_socket = [] + + self._timeouts = [] # a list for all the handlers + # we trim the timeouts once a while + self._timeout_offset = 0 # last checked position for timeout + self._handler_to_timeouts = {} # key: handler value: index in timeouts + + self._bind = config.get('out_bind', '') + self._bindv6 = config.get('out_bindv6', '') + self._ignore_bind_list = config.get('ignore_bind', []) + + if 'forbidden_ip' in config: + self._forbidden_iplist = IPNetwork(config['forbidden_ip']) + else: + self._forbidden_iplist = None + if 'forbidden_port' in config: + self._forbidden_portset = PortRange(config['forbidden_port']) + else: + self._forbidden_portset = None + if 'disconnect_ip' in config: + self._disconnect_ipset = IPNetwork(config['disconnect_ip']) + else: + self._disconnect_ipset = None + + self._relay_rules = self._config['relay_rules'].copy() + self._is_pushing_relay_rules = False + + addrs = socket.getaddrinfo(self._listen_addr, self._listen_port, 0, + socket.SOCK_DGRAM, socket.SOL_UDP) + if len(addrs) == 0: + raise Exception("can't get addrinfo for %s:%d" % + (self._listen_addr, self._listen_port)) + af, socktype, proto, canonname, sa = addrs[0] + server_socket = socket.socket(af, socktype, proto) + server_socket.bind((self._listen_addr, self._listen_port)) + server_socket.setblocking(False) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024 * 1024) + server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024 * 1024) + self._server_socket = server_socket + self._stat_callback = stat_callback + + def _get_a_server(self): + server = self._config['server'] + server_port = self._config['server_port'] + if isinstance(server_port, list): + server_port = random.choice(server_port) + if isinstance(server, list): + server = random.choice(server) + logging.debug('chosen server: %s:%d', server, server_port) + return server, server_port + + def add_transfer_u(self, user, transfer): + if ((user is None or user == 0) and self._config["is_multi_user"] != 0) or self._config["is_multi_user"] == 0: + self.server_transfer_ul += transfer + else: + if user not in self.mu_server_transfer_ul: + self.mu_server_transfer_ul[user] = 0 + self.mu_server_transfer_ul[ + user] += transfer + self.server_transfer_ul + self.server_transfer_ul = 0 + + def add_transfer_d(self, user, transfer): + if ((user is None or user == 0) and self._config["is_multi_user"] != 0) or self._config["is_multi_user"] == 0: + self.server_transfer_dl += transfer + else: + if user not in self.mu_server_transfer_dl: + self.mu_server_transfer_dl[user] = 0 + self.mu_server_transfer_dl[ + user] += transfer + self.server_transfer_dl + self.server_transfer_dl = 0 + + def _close_client_pair(self, client_pair): + client, uid = client_pair + self._close_client(client) + + def _close_client(self, client): + if hasattr(client, 'close'): + if not self._is_local: + if client.fileno() in self._client_fd_to_server_addr: + logging.debug( + 'close_client: %s' % + (self._client_fd_to_server_addr[ + client.fileno()],)) + else: + client.info('close_client') + self._sockets.remove(client.fileno()) + self._eventloop.remove(client) + del self._client_fd_to_server_addr[client.fileno()] + client.close() + else: + # just an address + client.info('close_client pass %s' % client) + pass + + def _pre_parse_udp_header(self, data): + if data is None: + return + datatype = common.ord(data[0]) + if datatype == 0x8: + if len(data) >= 8: + crc = binascii.crc32(data) & 0xffffffff + if crc != 0xffffffff: + logging.warn('uncorrect CRC32, maybe wrong password or ' + 'encryption method') + return None + cmd = common.ord(data[1]) + request_id = struct.unpack('>H', data[2:4])[0] + data = data[4:-4] + return (cmd, request_id, data) + elif len(data) >= 6 and common.ord(data[1]) == 0x0: + crc = binascii.crc32(data) & 0xffffffff + if crc != 0xffffffff: + logging.warn('uncorrect CRC32, maybe wrong password or ' + 'encryption method') + return None + cmd = common.ord(data[1]) + data = data[2:-4] + return (cmd, 0, data) + else: + logging.warn('header too short, maybe wrong password or ' + 'encryption method') + return None + return data + + def _pack_rsp_data(self, cmd, request_id, data): + _rand_data = b"123456789abcdefghijklmnopqrstuvwxyz" * 2 + reqid_str = struct.pack(">H", request_id) + return b''.join([CMD_VER_STR, common.chr(cmd), reqid_str, data, _rand_data[ + :random.randint(0, len(_rand_data))], reqid_str]) + + def _handel_protocol_error(self, client_address, ogn_data): + #raise Exception('can not parse header') + logging.warn( + "Protocol ERROR, UDP ogn data %s from %s:%d" % + (binascii.hexlify(ogn_data), client_address[0], client_address[1])) + if client_address[0] not in self.wrong_iplist and client_address[ + 0] != 0 and self.is_cleaning_wrong_iplist == False: + self.wrong_iplist[client_address[0]] = time.time() + + def _get_relay_host(self, client_address, ogn_data): + for id in self._relay_rules: + if self._relay_rules[id]['port'] == 0: + port = self._listen_port + else: + port = self._relay_rules[id]['port'] + return (self._relay_rules[id]['dist_ip'], int(port)) + return (None, None) + + def _handel_normal_relay(self, client_address, ogn_data): + host, port = self._get_relay_host(client_address, ogn_data) + self._encrypt_correct = False + if port is None: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + return (data + ogn_data, True) + + def _get_mu_relay_host(self, ogn_data, uid): + + if not uid: + return (None, None) + + for id in self._relay_rules: + if (self._relay_rules[id]['user_id'] == 0 and uid != + 0) or self._relay_rules[id]['user_id'] == uid: + has_higher_priority = False + for priority_id in self._relay_rules: + if ( + ( + self._relay_rules[priority_id]['priority'] > self._relay_rules[id]['priority'] and self._relay_rules[id]['id'] != self._relay_rules[priority_id]['id']) or ( + self._relay_rules[priority_id]['priority'] == self._relay_rules[id]['priority'] and self._relay_rules[id]['id'] > self._relay_rules[priority_id]['id'])) and ( + self._relay_rules[priority_id]['user_id'] == uid or self._relay_rules[priority_id]['user_id'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self._relay_rules[id]['dist_ip'] == '0.0.0.0': + continue + + if self._relay_rules[id]['port'] == 0: + port = self._listen_port + else: + port = self._relay_rules[id]['port'] + + return (self._relay_rules[id]['dist_ip'], int(port)) + return (None, None) + + def _handel_mu_relay(self, client_address, ogn_data, uid): + host, port = self._get_mu_relay_host(ogn_data, uid) + if host is None: + return (ogn_data, False) + self._encrypt_correct = False + if port is None: + raise Exception('can not parse header') + data = b"\x03" + common.to_bytes(common.chr(len(host))) + \ + common.to_bytes(host) + struct.pack('>H', port) + return (data + ogn_data, True) + + def _is_relay(self, client_address, ogn_data, uid): + if self._config['is_multi_user'] == 0: + if self._get_relay_host(client_address, ogn_data) == (None, None): + return False + else: + if self._get_mu_relay_host(ogn_data, uid) == (None, None): + return False + return True + + def _socket_bind_addr(self, sock, af, is_relay): + bind_addr = '' + if self._bind and af == socket.AF_INET: + bind_addr = self._bind + elif self._bindv6 and af == socket.AF_INET6: + bind_addr = self._bindv6 + + bind_addr = bind_addr.replace("::ffff:", "") + if bind_addr in self._ignore_bind_list: + bind_addr = None + + if is_relay: + bind_addr = None + + if bind_addr: + local_addrs = socket.getaddrinfo( + bind_addr, 0, 0, socket.SOCK_STREAM, socket.SOL_TCP) + if local_addrs[0][0] == af: + logging.debug("bind %s" % (bind_addr,)) + sock.bind((bind_addr, 0)) + + def _handle_server(self): + server = self._server_socket + data, r_addr = server.recvfrom(BUF_SIZE) + ogn_data = data + if not data: + logging.debug('UDP handle_server: data is empty') + if self._stat_callback: + self._stat_callback(self._listen_port, len(data)) + uid = None + if self._is_local: + frag = common.ord(data[2]) + if frag != 0: + logging.warn('drop a message since frag is not 0') + return + else: + data = data[3:] + else: + try: + data, key, ref_iv = encrypt.decrypt_all(self._password, + self._method, + data) + except Exception: + logging.debug('UDP handle_server: decrypt data failed') + return + + # decrypt data + if not data: + logging.debug('UDP handle_server: data is empty after decrypt') + return + ref_iv = [0] + self._protocol.obfs.server_info.recv_iv = ref_iv[0] + data, uid = self._protocol.server_udp_post_decrypt(data) + + if self._config['is_multi_user'] != 0 and data: + if uid: + if uid not in self.mu_server_transfer_ul: + self.mu_server_transfer_ul[uid] = 0 + if uid not in self.mu_server_transfer_dl: + self.mu_server_transfer_dl[uid] = 0 + if uid not in self.mu_connected_iplist: + self.mu_connected_iplist[uid] = [] + if uid not in self.mu_detect_log_list: + self.mu_detect_log_list[uid] = [] + + if common.getRealIp(r_addr[0]) not in self.mu_connected_iplist[uid]: + self.mu_connected_iplist[uid].append(common.getRealIp(r_addr[0])) + + else: + raise Exception( + 'This port is multi user in single port only,so The connection has been rejected, when connect from %s:%d via port %d' % + (r_addr[0], r_addr[1], self._listen_port)) + + is_relay = False + + #logging.info("UDP data %s" % (binascii.hexlify(data),)) + if not self._is_local: + + if not self._is_relay(r_addr, ogn_data, uid): + data = pre_parse_header(data) + + data = self._pre_parse_udp_header(data) + if data is None: + return + + if isinstance(data, tuple): + return + # return self._handle_tcp_over_udp(data, r_addr) + else: + if self._config["is_multi_user"] == 0: + data, is_relay = self._handel_normal_relay(r_addr, ogn_data) + else: + data, is_relay = self._handel_mu_relay(r_addr, ogn_data, uid) + + try: + header_result = parse_header(data) + except: + self._handel_protocol_error(r_addr, ogn_data) + return + + if header_result is None: + self._handel_protocol_error(r_addr, ogn_data) + return + connecttype, addrtype, dest_addr, dest_port, header_length = header_result + + if self._is_local: + addrtype = 3 + server_addr, server_port = self._get_a_server() + else: + server_addr, server_port = dest_addr, dest_port + + if (addrtype & 7) == 3: + af = common.is_ip(server_addr) + if af == False: + handler = common.UDPAsyncDNSHandler((data, r_addr, uid, header_length, is_relay)) + handler.resolve(self._dns_resolver, (server_addr, server_port), self._handle_server_dns_resolved) + else: + self._handle_server_dns_resolved("", (server_addr, server_port), server_addr, (data, r_addr, uid, header_length, is_relay)) + else: + self._handle_server_dns_resolved("", (server_addr, server_port), server_addr, (data, r_addr, uid, header_length, is_relay)) + + def _handle_server_dns_resolved(self, error, remote_addr, server_addr, params): + if error: + return + data, r_addr, uid, header_length, is_relay = params + if uid is None: + is_mu = False + user_id = self._listen_port + else: + is_mu = True + user_id = uid + try: + server_port = remote_addr[1] + addrs = socket.getaddrinfo(server_addr, server_port, 0, + socket.SOCK_DGRAM, socket.SOL_UDP) + if not addrs: # drop + return + af, socktype, proto, canonname, sa = addrs[0] + server_addr = sa[0] + key = client_key(r_addr, af) + client_pair = self._cache.get(key, None) + if client_pair is None: + client_pair = self._cache_dns_client.get(key, None) + if client_pair is None: + if self._forbidden_iplist: + if common.to_str(sa[0]) in self._forbidden_iplist: + logging.debug('IP %s is in forbidden list, drop' % common.to_str(sa[0])) + # drop + return + if self._disconnect_ipset: + if common.to_str(sa[0]) in self._disconnect_ipset: + logging.debug('IP %s is in disconnect list, drop' % common.to_str(sa[0])) + # drop + return + if self._forbidden_portset: + if sa[1] in self._forbidden_portset: + logging.debug('Port %d is in forbidden list, reject' % sa[1]) + # drop + return + + if is_mu: + if self.multi_user_table[uid]['_forbidden_iplist']: + if common.to_str(sa[0]) in self.multi_user_table[uid]['_forbidden_iplist']: + logging.debug('IP %s is in forbidden list, drop' % common.to_str(sa[0])) + # drop + return + if self.multi_user_table[uid]['_disconnect_ipset']: + if common.to_str(sa[0]) in self.multi_user_table[uid]['_disconnect_ipset']: + logging.debug('IP %s is in disconnect list, drop' % common.to_str(sa[0])) + # drop + return + if self.multi_user_table[uid]['_forbidden_portset']: + if sa[1] in self.multi_user_table[uid]['_forbidden_portset']: + logging.debug('Port %d is in forbidden list, reject' % sa[1]) + # drop + return + + client = socket.socket(af, socktype, proto) + client_uid = uid + client.setblocking(False) + self._socket_bind_addr(client, af, is_relay) + is_dns = False + if len(data) > header_length + 13 and data[header_length + 4 : header_length + 12] == b"\x00\x01\x00\x00\x00\x00\x00\x00": + is_dns = True + else: + pass + if sa[1] == 53 and is_dns: #DNS + logging.debug("DNS query %s from %s:%d" % (common.to_str(sa[0]), r_addr[0], r_addr[1])) + self._cache_dns_client[key] = (client, uid) + else: + self._cache[key] = (client, uid) + self._client_fd_to_server_addr[client.fileno()] = (r_addr, af) + + self._sockets.add(client.fileno()) + self._eventloop.add(client, eventloop.POLL_IN, self) + + logging.debug('UDP port %5d sockets %d' % (self._listen_port, len(self._sockets))) + + if not self.is_pushing_detect_text_list: + for id in self.detect_text_list: + if common.match_regex( + self.detect_text_list[id]['regex'], + str(data)): + if self._config['is_multi_user'] != 0 and uid != 0: + if self.is_cleaning_mu_detect_log_list == False and id not in self.mu_detect_log_list[ + uid]: + self.mu_detect_log_list[uid].append(id) + else: + if self.is_cleaning_detect_log == False and id not in self.detect_log_list: + self.detect_log_list.append(id) + raise Exception( + 'This connection match the regex: id:%d was reject,regex: %s ,connecting %s:%d from %s:%d via port %d' % + (self.detect_text_list[id]['id'], + self.detect_text_list[id]['regex'], + common.to_str(server_addr), + server_port, + r_addr[0], + r_addr[1], + self._listen_port)) + if not self.is_pushing_detect_hex_list: + for id in self.detect_hex_list: + if common.match_regex( + self.detect_hex_list[id]['regex'], + binascii.hexlify(data)): + if self._config['is_multi_user'] != 0 and uid != 0: + if self.is_cleaning_mu_detect_log_list == False and id not in self.mu_detect_log_list[ + uid]: + self.mu_detect_log_list[uid].append(id) + else: + if self.is_cleaning_detect_log == False and id not in self.detect_log_list: + self.detect_log_list.append(id) + raise Exception( + 'This connection match the regex: id:%d was reject,regex: %s ,connecting %s:%d from %s:%d via port %d' % + (self.detect_hex_list[id]['id'], + self.detect_hex_list[id]['regex'], + common.to_str(server_addr), + server_port, + r_addr[0], + r_addr[1], + self._listen_port)) + if not self._connect_hex_data: + common.connect_log('UDP data to %s:%d from %s:%d via port %d' % + (common.to_str(server_addr), server_port, + r_addr[0], r_addr[1], self._listen_port)) + else: + common.connect_log( + 'UDP data to %s:%d from %s:%d via port %d,hex data : %s' % + (common.to_str(server_addr), + server_port, + r_addr[0], + r_addr[1], + self._listen_port, + binascii.hexlify(data))) + if self._config['is_multi_user'] != 2: + if common.to_str(r_addr[0]) in self.wrong_iplist and r_addr[ + 0] != 0 and self.is_cleaning_wrong_iplist == False: + del self.wrong_iplist[common.to_str(r_addr[0])] + if common.getRealIp(r_addr[0]) not in self.connected_iplist and r_addr[ + 0] != 0 and self.is_cleaning_connected_iplist == False: + self.connected_iplist.append(common.getRealIp(r_addr[0])) + else: + client, client_uid = client_pair + self._cache.clear(self._udp_cache_size) + self._cache_dns_client.clear(16) + + if self._is_local: + try: + key, ref_iv, m = encrypt.gen_key_iv(self._password, self._method) + self._protocol.obfs.server_info.iv = ref_iv[0] + data = self._protocol.client_udp_pre_encrypt(data) + #logging.debug("%s" % (binascii.hexlify(data),)) + data = encrypt.encrypt_all_m(key, ref_iv, m, self._method, data) + except Exception: + logging.debug("UDP handle_server: encrypt data failed") + return + if not data: + return + else: + data = data[header_length:] + if not data: + return + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + logging.error("exception from user %d" % (user_id,)) + + try: + client.sendto(data, (server_addr, server_port)) + self.add_transfer_u(client_uid, len(data)) + if client_pair is None: # new request + addr, port = client.getsockname()[:2] + common.connect_log('UDP data to %s(%s):%d from %s:%d by user %d' % + (common.to_str(remote_addr[0]), common.to_str(server_addr), server_port, addr, port, user_id)) + except IOError as e: + err = eventloop.errno_from_exception(e) + logging.warning('IOError sendto %s:%d by user %d' % (server_addr, server_port, user_id)) + if err in (errno.EINPROGRESS, errno.EAGAIN): + pass + else: + shell.print_exception(e) + + def _handle_client(self, sock): + data, r_addr = sock.recvfrom(BUF_SIZE) + if not data: + logging.debug('UDP handle_client: data is empty') + return + if self._stat_callback: + self._stat_callback(self._listen_port, len(data)) + + client_addr = self._client_fd_to_server_addr.get(sock.fileno()) + client_uid = None + if client_addr: + key = client_key(client_addr[0], client_addr[1]) + client_pair = self._cache.get(key, None) + client_dns_pair = self._cache_dns_client.get(key, None) + if client_pair: + client, client_uid = client_pair + elif client_dns_pair: + client, client_uid = client_dns_pair + + if not self._is_local: + addrlen = len(r_addr[0]) + if addrlen > 255: + # drop + return + + origin_data = data[:] + + data = pack_addr(r_addr[0]) + struct.pack('>H', r_addr[1]) + data + try: + ref_iv = [encrypt.encrypt_new_iv(self._method)] + self._protocol.obfs.server_info.iv = ref_iv[0] + data = self._protocol.server_udp_pre_encrypt(data, client_uid) + response = encrypt.encrypt_all(self._password, + self._method, data) + except Exception: + logging.debug("UDP handle_client: encrypt data failed") + return + if not response: + return + else: + try: + data, key, ref_iv = encrypt.decrypt_all(self._password, + self._method, data) + except Exception: + logging.debug('UDP handle_client: decrypt data failed') + return + if not data: + return + self._protocol.obfs.server_info.recv_iv = ref_iv[0] + data = self._protocol.client_udp_post_decrypt(data) + header_result = parse_header(data) + if header_result is None: + return + #connecttype, dest_addr, dest_port, header_length = header_result + #logging.debug('UDP handle_client %s:%d to %s:%d' % (common.to_str(r_addr[0]), r_addr[1], dest_addr, dest_port)) + + response = b'\x00\x00\x00' + data + + if client_addr: + if client_uid: + self.add_transfer_d(client_uid, len(response)) + else: + self.server_transfer_dl += len(response) + + if self._is_relay(r_addr, origin_data, client_uid): + response = origin_data + + self.write_to_server_socket(response, client_addr[0]) + if client_dns_pair: + logging.debug( + "remove dns client %s:%d" % + (client_addr[0][0], client_addr[0][1])) + del self._cache_dns_client[key] + self._close_client(client_dns_pair[0]) + else: + # this packet is from somewhere else we know + # simply drop that packet + pass + + def write_to_server_socket(self, data, addr): + uncomplete = False + retry = 0 + try: + self._server_socket.sendto(data, addr) + data = None + while self._data_to_write_to_server_socket: + data_buf = self._data_to_write_to_server_socket[0] + retry = data_buf[1] + 1 + del self._data_to_write_to_server_socket[0] + data, addr = data_buf[0] + self._server_socket.sendto(data, addr) + except (OSError, IOError) as e: + error_no = eventloop.errno_from_exception(e) + uncomplete = True + if error_no in (errno.EWOULDBLOCK,): + pass + else: + shell.print_exception(e) + return False + # if uncomplete and data is not None and retry < 3: + # self._data_to_write_to_server_socket.append([(data, addr), retry]) + #''' + + def add_to_loop(self, loop): + if self._eventloop: + raise Exception('already add to loop') + if self._closed: + raise Exception('already closed') + self._eventloop = loop + + server_socket = self._server_socket + self._eventloop.add(server_socket, + eventloop.POLL_IN | eventloop.POLL_ERR, self) + loop.add_periodic(self.handle_periodic) + + def remove_handler(self, handler): + index = self._handler_to_timeouts.get(hash(handler), -1) + if index >= 0: + # delete is O(n), so we just set it to None + self._timeouts[index] = None + del self._handler_to_timeouts[hash(handler)] + + def update_activity(self, handler): + # set handler to active + now = int(time.time()) + if now - handler.last_activity < eventloop.TIMEOUT_PRECISION: + # thus we can lower timeout modification frequency + return + handler.last_activity = now + index = self._handler_to_timeouts.get(hash(handler), -1) + if index >= 0: + # delete is O(n), so we just set it to None + self._timeouts[index] = None + length = len(self._timeouts) + self._timeouts.append(handler) + self._handler_to_timeouts[hash(handler)] = length + + def _sweep_timeout(self): + # tornado's timeout memory management is more flexible than we need + # we just need a sorted last_activity queue and it's faster than heapq + # in fact we can do O(1) insertion/remove so we invent our own + if self._timeouts: + logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts') + now = time.time() + length = len(self._timeouts) + pos = self._timeout_offset + while pos < length: + handler = self._timeouts[pos] + if handler: + if now - handler.last_activity < self._timeout: + break + else: + if handler.remote_address: + logging.debug('timed out: %s:%d' % + handler.remote_address) + else: + logging.debug('timed out') + handler.destroy() + handler.destroy_local() + self._timeouts[pos] = None # free memory + pos += 1 + else: + pos += 1 + if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1: + # clean up the timeout queue when it gets larger than half + # of the queue + self._timeouts = self._timeouts[pos:] + for key in self._handler_to_timeouts: + self._handler_to_timeouts[key] -= pos + pos = 0 + self._timeout_offset = pos + + def handle_event(self, sock, fd, event): + if sock == self._server_socket: + if event & eventloop.POLL_ERR: + logging.error('UDP server_socket err') + try: + self._handle_server() + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + elif sock and (fd in self._sockets): + if event & eventloop.POLL_ERR: + logging.error('UDP client_socket err') + try: + self._handle_client(sock) + except Exception as e: + shell.print_exception(e) + if self._config['verbose']: + traceback.print_exc() + else: + if sock: + handler = self._fd_to_handlers.get(fd, None) + if handler: + handler.handle_event(sock, event) + else: + logging.warn('poll removed fd') + + def handle_periodic(self): + if self._closed: + self._cache.clear(0) + self._cache_dns_client.clear(0) + if self._eventloop: + self._eventloop.remove_periodic(self.handle_periodic) + self._eventloop.remove(self._server_socket) + if self._server_socket: + self._server_socket.close() + self._server_socket = None + logging.info('closed UDP port %d', self._listen_port) + else: + before_sweep_size = len(self._sockets) + self._cache.sweep() + self._cache_dns_client.sweep() + if before_sweep_size != len(self._sockets): + logging.debug( + 'UDP port %5d sockets %d' % + (self._listen_port, len( + self._sockets))) + self._sweep_timeout() + + def connected_iplist_clean(self): + self.is_cleaninglist = True + del self.connected_iplist[:] + self.is_cleaning_connected_iplist = False + + def mu_connected_iplist_clean(self): + self.is_cleaning_mu_connected_iplist = True + for id in self.mu_connected_iplist: + del self.mu_connected_iplist[id][:] + self.is_cleaning_mu_connected_iplist = False + + def wrong_iplist_clean(self): + self.is_cleaning_wrong_iplist = True + + temp_new_list = {} + for key in self.wrong_iplist: + if self.wrong_iplist[key] > time.time() - 60: + temp_new_list[key] = self.wrong_iplist[key] + + self.wrong_iplist = temp_new_list.copy() + + self.is_cleaning_wrong_iplist = True + + def detect_log_list_clean(self): + self.is_cleaning_detect_log = True + del self.detect_log_list[:] + self.is_cleaning_detect_log = False + + def mu_detect_log_list_clean(self): + self.is_cleaning_mu_detect_log_list = True + for id in self.mu_detect_log_list: + del self.mu_detect_log_list[id][:] + self.is_cleaning_mu_detect_log_list = False + + def reset_single_multi_user_traffic(self, user_id): + if user_id in self.mu_server_transfer_ul: + self.mu_server_transfer_ul[user_id] = 0 + if user_id in self.mu_server_transfer_dl: + self.mu_server_transfer_dl[user_id] = 0 + + def modify_detect_text_list(self, new_list): + self.is_pushing_detect_text_list = True + self.detect_text_list = new_list.copy() + self.is_pushing_detect_text_list = False + + def modify_detect_hex_list(self, new_list): + self.is_pushing_detect_hex_list = True + self.detect_hex_list = new_list.copy() + self.is_pushing_detect_hex_list = False + + def modify_multi_user_table(self, new_table): + self.multi_user_table = new_table.copy() + self.multi_user_host_table = {} + + self._protocol.obfs.server_info.users = self.multi_user_table + + for id in self.multi_user_table: + self.multi_user_host_table[common.get_mu_host( + id, self.multi_user_table[id]['md5'])] = id + if self.multi_user_table[id]['forbidden_ip'] is not None: + self.multi_user_table[id]['_forbidden_iplist'] = IPNetwork( + str(self.multi_user_table[id]['forbidden_ip'])) + else: + self.multi_user_table[id][ + '_forbidden_iplist'] = IPNetwork(str("")) + if self.multi_user_table[id]['disconnect_ip'] is not None: + self.multi_user_table[id]['_disconnect_ipset'] = IPNetwork( + str(self.multi_user_table[id]['disconnect_ip'])) + else: + self.multi_user_table[id]['_disconnect_ipset'] = None + if self.multi_user_table[id]['forbidden_port'] is not None: + self.multi_user_table[id]['_forbidden_portset'] = PortRange( + str(self.multi_user_table[id]['forbidden_port'])) + else: + self.multi_user_table[id][ + '_forbidden_portset'] = PortRange(str("")) + + def push_relay_rules(self, rules): + self._is_pushing_relay_rules = True + self._relay_rules = rules.copy() + self._is_pushing_relay_rules = False + + def close(self, next_tick=False): + logging.debug('UDP close') + self._closed = True + if not next_tick: + if self._eventloop: + self._eventloop.remove_periodic(self.handle_periodic) + self._eventloop.remove(self._server_socket) + self._server_socket.close() + self._cache.clear(0) + self._cache_dns_client.clear(0) diff --git a/shadowsocks/version.py b/shadowsocks/version.py new file mode 100644 index 0000000..a809ee8 --- /dev/null +++ b/shadowsocks/version.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2016 breakwa11 +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +def version(): + return '3.4.0 mod by esdeathlove' diff --git a/speedtest/.gitignore b/speedtest/.gitignore new file mode 100644 index 0000000..f24cd99 --- /dev/null +++ b/speedtest/.gitignore @@ -0,0 +1,27 @@ +*.py[co] + +# Packages +*.egg +*.egg-info +dist +build +eggs +parts +bin +var +sdist +develop-eggs +.installed.cfg + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox + +#Translations +*.mo + +#Mr Developer +.mr.developer.cfg diff --git a/speedtest/.travis.yml b/speedtest/.travis.yml new file mode 100644 index 0000000..8863201 --- /dev/null +++ b/speedtest/.travis.yml @@ -0,0 +1,37 @@ +language: python + +python: + - 2.7 + +addons: + apt: + sources: + - deadsnakes + packages: + - python2.4 + - python2.5 + - python2.6 + - pypy + +env: + - TOXENV=py24 + - TOXENV=py25 + - TOXENV=py26 + - TOXENV=py27 + - TOXENV=py32 + - TOXENV=py33 + - TOXENV=py34 + - TOXENV=py35 + - TOXENV=pypy + - TOXENV=flake8 + +install: + - if [[ $(echo "$TOXENV" | egrep -c "(py2[45]|py3[12])") != 0 ]]; then pip install virtualenv==1.7.2 tox==1.3; fi; + - if [[ $(echo "$TOXENV" | egrep -c "(py2[45]|py3[12])") == 0 ]]; then pip install tox; fi; + +script: + - tox + +notifications: + email: + - matt@sivel.net diff --git a/speedtest/CONTRIBUTING.md b/speedtest/CONTRIBUTING.md new file mode 100644 index 0000000..f40e03a --- /dev/null +++ b/speedtest/CONTRIBUTING.md @@ -0,0 +1,39 @@ +# Pull Requests + +## Pull requests should be + +1. Made against the `devel` branch. +1. Made from a git feature branch. + +## Pull requests will not be accepted that + +1. Are not made against the `devel` branch +1. Are submitted from a branch named `devel` +1. Do not pass pep8/pyflakes/flake8 +1. Do not work with Python 2.4-3.4 or pypy +1. Add python modules not included with the Python standard library +1. Are made by editing files via the GitHub website + +# Coding Guidelines + +In general, I follow strict pep8 and pyflakes. All code must pass these tests. Since we support python 2.4-3.4 and pypy, pyflakes reports unknown names in python 3. pyflakes is run in python 2.7 only in my tests. + +## Some other points + +1. Do not use `\` for line continuations, long strings should be wrapped in `()`. Imports should start a brand new line in the form of `from foo import...` +1. String quoting should be done with single quotes `'`, except for situations where you would otherwise have to escape an internal single quote +1. Docstrings should use three double quotes `"""` +1. All functions, classes and modules should have docstrings following both the PEP257 and PEP8 standards +1. Inline comments should only be used on code where it is not immediately obvious what the code achieves + +# Supported Python Versions + +All code needs to support Python 2.4-3.4 and pypy. + +# Permitted Python Modules + +Only modules included in the standard library are permitted for use in this application. This application should not be dependent on any 3rd party modules that would need to be installed external to just Python itself. + +# Testing + +Currently there are no unit tests, but they are planned. diff --git a/speedtest/LICENSE b/speedtest/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/speedtest/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/speedtest/MANIFEST.in b/speedtest/MANIFEST.in new file mode 100644 index 0000000..9d5d250 --- /dev/null +++ b/speedtest/MANIFEST.in @@ -0,0 +1,2 @@ +include LICENSE +include README.rst diff --git a/speedtest/README.rst b/speedtest/README.rst new file mode 100644 index 0000000..1cb1865 --- /dev/null +++ b/speedtest/README.rst @@ -0,0 +1,133 @@ +speedtest-cli +============= + +Command line interface for testing internet bandwidth using +speedtest.net + +.. image:: https://img.shields.io/pypi/v/speedtest-cli.svg + :target: https://pypi.python.org/pypi/speedtest-cli/ + :alt: Latest Version +.. image:: https://img.shields.io/pypi/dm/speedtest-cli.svg + :target: https://pypi.python.org/pypi/speedtest-cli/ + :alt: Downloads +.. image:: https://img.shields.io/pypi/l/speedtest-cli.svg + :target: https://pypi.python.org/pypi/speedtest-cli/ + :alt: License + +Versions +-------- + +speedtest-cli works with Python 2.4-3.5 + +.. image:: https://img.shields.io/pypi/pyversions/speedtest-cli.svg + :target: https://pypi.python.org/pypi/speedtest-cli/ + :alt: Versions + +Installation +------------ + +pip / easy\_install +~~~~~~~~~~~~~~~~~~~ + +:: + + pip install speedtest-cli + +or + +:: + + easy_install speedtest-cli + +Github +~~~~~~ + +:: + + pip install git+https://github.com/sivel/speedtest-cli.git + +or + +:: + + git clone https://github.com/sivel/speedtest-cli.git + python speedtest-cli/setup.py install + +Just download (Like the way it used to be) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +:: + + wget -O speedtest-cli https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py + chmod +x speedtest-cli + +or + +:: + + curl -Lo speedtest-cli https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py + chmod +x speedtest-cli + +Usage +----- + +:: + + $ speedtest-cli -h + usage: speedtest-cli [-h] [--bytes] [--share] [--simple] [--csv] + [--csv-delimiter CSV_DELIMITER] [--csv-header] [--json] + [--list] [--server SERVER] [--mini MINI] [--source SOURCE] + [--timeout TIMEOUT] [--secure] [--version] + + Command line interface for testing internet bandwidth using speedtest.net. + -------------------------------------------------------------------------- + https://github.com/sivel/speedtest-cli + + optional arguments: + -h, --help show this help message and exit + --bytes Display values in bytes instead of bits. Does not + affect the image generated by --share, nor output from + --json or --csv + --share Generate and provide a URL to the speedtest.net share + results image + --simple Suppress verbose output, only show basic information + --csv Suppress verbose output, only show basic information + in CSV format. Speeds listed in bit/s and not affected + by --bytes + --csv-delimiter CSV_DELIMITER + Single character delimiter to use in CSV output. + Default "," + --csv-header Print CSV headers + --json Suppress verbose output, only show basic information + in JSON format. Speeds listed in bit/s and not + affected by --bytes + --list Display a list of speedtest.net servers sorted by + distance + --server SERVER Specify a server ID to test against + --mini MINI URL of the Speedtest Mini server + --source SOURCE Source IP address to bind to + --timeout TIMEOUT HTTP timeout in seconds. Default 10 + --secure Use HTTPS instead of HTTP when communicating with + speedtest.net operated servers + --version Show the version number and exit + +Inconsistency +------------- + +It is not a goal of this application to be a reliable latency reporting tool. + +Latency reported by this tool should not be relied on as a value indicative of ICMP +style latency. It is a relative value used for determining the lowest latency server +for performing the actual speed test against. + +There is the potential for this tool to report results inconsistent with Speedtest.net. +There are several concepts to be aware of that factor into the potential inconsistency: + +1. Speedtest.net has migrated to using pure socket tests instead of HTTP based tests +2. This application is written in Python +3. Different versions of Python will execute certain parts of the code faster than others +4. CPU and Memory capacity and speed will play a large part in inconsistency between + Speedtest.net and even other machines on the same network + +Issues relating to inconsistencies will be closed as wontfix and without +additional reason or context. diff --git a/speedtest/__init__.py b/speedtest/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/speedtest/setup.cfg b/speedtest/setup.cfg new file mode 100644 index 0000000..7c964b4 --- /dev/null +++ b/speedtest/setup.cfg @@ -0,0 +1,2 @@ +[wheel] +universal=1 diff --git a/speedtest/setup.py b/speedtest/setup.py new file mode 100644 index 0000000..00a8054 --- /dev/null +++ b/speedtest/setup.py @@ -0,0 +1,94 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2012-2016 Matt Martz +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import codecs + +from setuptools import setup + +here = os.path.abspath(os.path.dirname(__file__)) + + +# Read the version number from a source file. +# Why read it, and not import? +# see https://groups.google.com/d/topic/pypa-dev/0PkjVpcxTzQ/discussion +def find_version(*file_paths): + # Open in Latin-1 so that we avoid encoding errors. + # Use codecs.open for Python 2 compatibility + try: + f = codecs.open(os.path.join(here, *file_paths), 'r', 'latin1') + version_file = f.read() + f.close() + except: + raise RuntimeError("Unable to find version string.") + + # The version line must have the form + # __version__ = 'ver' + version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", + version_file, re.M) + if version_match: + return version_match.group(1) + raise RuntimeError("Unable to find version string.") + + +# Get the long description from the relevant file +try: + f = codecs.open('README.rst', encoding='utf-8') + long_description = f.read() + f.close() +except: + long_description = '' + + +setup( + name='speedtest-cli', + version=find_version('speedtest.py'), + description=('Command line interface for testing internet bandwidth using ' + 'speedtest.net'), + long_description=long_description, + keywords='speedtest speedtest.net', + author='Matt Martz', + author_email='matt@sivel.net', + url='https://github.com/sivel/speedtest-cli', + license='Apache License, Version 2.0', + py_modules=['speedtest', 'speedtest_cli'], + entry_points={ + 'console_scripts': [ + 'speedtest=speedtest:main', + 'speedtest-cli=speedtest:main' + ] + }, + classifiers=[ + 'Development Status :: 5 - Production/Stable', + 'Programming Language :: Python', + 'Environment :: Console', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 2', + 'Programming Language :: Python :: 2.4', + 'Programming Language :: Python :: 2.5', + 'Programming Language :: Python :: 2.6', + 'Programming Language :: Python :: 2.7', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.1', + 'Programming Language :: Python :: 3.2', + 'Programming Language :: Python :: 3.3', + 'Programming Language :: Python :: 3.4', + 'Programming Language :: Python :: 3.5', + ] +) diff --git a/speedtest/speedtest-cli.1 b/speedtest/speedtest-cli.1 new file mode 100644 index 0000000..9e1befe --- /dev/null +++ b/speedtest/speedtest-cli.1 @@ -0,0 +1,118 @@ +.TH "speedtest-cli" 1 "2014-04-23" "speedtest-cli" +.SH NAME +speedtest\-cli \- Command line interface for testing internet bandwidth using speedtest.net +.SH SYNOPSIS +.B speedtest\-cli +[OPTION...] +.SH DESCRIPTION +Speedtest.net is a web service for testing your broadband connection by downloading a file +from a nearby speedtest.net server on the web. This tool allows you to access the service +from the command line. + +Speedtest mini is a version of the Speedtest.net server that you can host locally. + +.SH OPTIONS +Usage: speedtest\-cli [OPTION...] + +.B Help Options + +\fB\-h, \-\-help\fR +.RS +Displays usage for the tool. +.RE + +.B Options + +\fB\-\-bytes\fR +.RS +Display values in bytes instead of bits. Does not affect the image generated by \-\-share +.RE + +\fB\-\-share\fR +.RS +Generate and provide a URL to the speedtest.net share results image +.RE + +\fB\-\-simple\fR +.RS +Suppress verbose output, only show basic information +.RE + +\fB\-\-csv\fR +.RS +Suppress verbose output, only show basic information in CSV format. Speeds listed in bit/s and not affected by \-\-bytes +.RE + +\fB\-\-csv-delimiter CSV_DELIMITER\fR +.RS +Single character delimiter to use in CSV output. Default "," +.RE + +\fB\-\-csv-header\fR +.RS +Print CSV headers +.RE + +\fB\-\-json\fR +.RS +Suppress verbose output, only show basic information in JSON format. Speeds listed in bit/s and not affected by \-\-bytes +.RE + +\fB\-\-list\fR +.RS +Display a list of speedtest.net servers sorted by distance +.RE + +\fB\-\-server SERVER\fR +.RS +Specify a server ID to test against +.RE + +\fB\-\-mini MINI\fR +.RS +URL of the Speedtest Mini server +.RE + +\fB\-\-source SOURCE\fR +.RS +Source IP address to bind to +.RE + +\fB\-\-timeout TIMEOUT\fR +.RS +HTTP timeout in seconds. Default 10 +.RE + +\fB\-\-secure\fR +.RS +Use HTTPS instead of HTTP when communicating with speedtest.net operated servers +.RE + +\fB\-\-version\fR +.RS +Show the version number and exit +.RE + +.SH EXAMPLES + +\fBAutomatically find closest server and start testing\fR +.RS +speedtest\-cli +.RE + +\fBSpecify testing against server 1491\fR +.RS +speedtest-cli \-\-server 1491 +.RE + +\fBTesting against Speedtest Mini\fR +.RS +speedtest-cli \-\-mini 172.18.66.1 +.RE + +.SH REPORTING BUGS +Please file issues on the Github bug tracker: https://github.com/sivel/speedtest\-cli + +.SH AUTHORS +This manual page was written by Jonathan Carter +Speedtest\-cli was written by Matt Martz diff --git a/speedtest/speedtest.py b/speedtest/speedtest.py new file mode 100644 index 0000000..5c9d774 --- /dev/null +++ b/speedtest/speedtest.py @@ -0,0 +1,1431 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2012-2016 Matt Martz +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re +import csv +import sys +import math +import errno +import signal +import socket +import timeit +import datetime +import platform +import threading +import xml.parsers.expat + +try: + import gzip + GZIP_BASE = gzip.GzipFile +except ImportError: + gzip = None + GZIP_BASE = object + +__version__ = '1.0.2' + + +class FakeShutdownEvent(object): + """Class to fake a threading.Event.isSet so that users of this module + are not required to register their own threading.Event() + """ + + @staticmethod + def isSet(): + "Dummy method to always return false""" + return False + + +# Some global variables we use +USER_AGENT = None +SOURCE = None +SHUTDOWN_EVENT = FakeShutdownEvent() +SCHEME = 'http' +DEBUG = False + +# Used for bound_interface +SOCKET_SOCKET = socket.socket + +# Begin import game to handle Python 2 and Python 3 +try: + import json +except ImportError: + try: + import simplejson as json + except ImportError: + json = None + +try: + import xml.etree.cElementTree as ET +except ImportError: + try: + import xml.etree.ElementTree as ET + except ImportError: + from xml.dom import minidom as DOM + ET = None + +try: + from urllib2 import urlopen, Request, HTTPError, URLError +except ImportError: + from urllib.request import urlopen, Request, HTTPError, URLError + +try: + from httplib import HTTPConnection +except ImportError: + from http.client import HTTPConnection + +try: + from httplib import HTTPSConnection +except ImportError: + try: + from http.client import HTTPSConnection + except ImportError: + HTTPSConnection = None + +try: + from Queue import Queue +except ImportError: + from queue import Queue + +try: + from urlparse import urlparse +except ImportError: + from urllib.parse import urlparse + +try: + from urlparse import parse_qs +except ImportError: + try: + from urllib.parse import parse_qs + except ImportError: + from cgi import parse_qs + +try: + from hashlib import md5 +except ImportError: + from md5 import md5 + +try: + from argparse import ArgumentParser as ArgParser + from argparse import SUPPRESS as ARG_SUPPRESS + PARSER_TYPE_INT = int + PARSER_TYPE_STR = str +except ImportError: + from optparse import OptionParser as ArgParser + from optparse import SUPPRESS_HELP as ARG_SUPPRESS + PARSER_TYPE_INT = 'int' + PARSER_TYPE_STR = 'string' + +try: + from cStringIO import StringIO + BytesIO = None +except ImportError: + try: + from io import StringIO, BytesIO + except ImportError: + from StringIO import StringIO + BytesIO = None + +try: + import builtins +except ImportError: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5. + + Taken from https://pypi.python.org/pypi/six/ + + Modified to set encoding to UTF-8 if not set when stdout may not be + a tty such as when piping to head + """ + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + encoding = fp.encoding or 'UTF-8' # Diverges for notty + if (isinstance(fp, file) and + isinstance(data, unicode) and + encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +else: + print_ = getattr(builtins, 'print') + del builtins + +# Exception "constants" to support Python 2 through Python 3 +try: + import ssl + try: + CERT_ERROR = (ssl.CertificateError,) + except AttributeError: + CERT_ERROR = tuple() + + HTTP_ERRORS = ((HTTPError, URLError, socket.error, ssl.SSLError) + + CERT_ERROR) +except ImportError: + HTTP_ERRORS = (HTTPError, URLError, socket.error) + + +class SpeedtestException(Exception): + """Base exception for this module""" + + +class SpeedtestCLIError(SpeedtestException): + """Generic exception for raising errors during CLI operation""" + + +class SpeedtestHTTPError(SpeedtestException): + """Base HTTP exception for this module""" + + +class SpeedtestConfigError(SpeedtestException): + """Configuration provided is invalid""" + + +class ConfigRetrievalError(SpeedtestHTTPError): + """Could not retrieve config.php""" + + +class ServersRetrievalError(SpeedtestHTTPError): + """Could not retrieve speedtest-servers.php""" + + +class InvalidServerIDType(SpeedtestException): + """Server ID used for filtering was not an integer""" + + +class NoMatchedServers(SpeedtestException): + """No servers matched when filtering""" + + +class SpeedtestMiniConnectFailure(SpeedtestException): + """Could not connect to the provided speedtest mini server""" + + +class InvalidSpeedtestMiniServer(SpeedtestException): + """Server provided as a speedtest mini server does not actually appear + to be a speedtest mini server + """ + + +class ShareResultsConnectFailure(SpeedtestException): + """Could not connect to speedtest.net API to POST results""" + + +class ShareResultsSubmitFailure(SpeedtestException): + """Unable to successfully POST results to speedtest.net API after + connection + """ + + +class SpeedtestUploadTimeout(SpeedtestException): + """testlength configuration reached during upload + Used to ensure the upload halts when no additional data should be sent + """ + + +class SpeedtestBestServerFailure(SpeedtestException): + """Unable to determine best server""" + + +class GzipDecodedResponse(GZIP_BASE): + """A file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + + Largely copied from ``xmlrpclib``/``xmlrpc.client`` and modified + to work for py2.4-py3 + """ + + def __init__(self, response): + # response doesn't support tell() and read(), required by + # GzipFile + if not gzip: + raise SpeedtestHTTPError('HTTP response body is gzip encoded, ' + 'but gzip support is not available') + IO = BytesIO or StringIO + self.io = IO() + while True: + chunk = response.read(1024) + if len(chunk) == 0: + break + self.io.write(chunk) + self.io.seek(0) + gzip.GzipFile.__init__(self, mode='rb', fileobj=self.io) + + def close(self): + try: + gzip.GzipFile.close(self) + finally: + self.io.close() + + +def get_exception(): + """Helper function to work with py2.4-py3 for getting the current + exception in a try/except block + """ + return sys.exc_info()[1] + + +def bound_socket(*args, **kwargs): + """Bind socket to a specified source IP address""" + + sock = SOCKET_SOCKET(*args, **kwargs) + sock.bind((SOURCE, 0)) + return sock + + +def distance(origin, destination): + """Determine distance between 2 sets of [lat,lon] in km""" + + lat1, lon1 = origin + lat2, lon2 = destination + radius = 6371 # km + + dlat = math.radians(lat2 - lat1) + dlon = math.radians(lon2 - lon1) + a = (math.sin(dlat / 2) * math.sin(dlat / 2) + + math.cos(math.radians(lat1)) * + math.cos(math.radians(lat2)) * math.sin(dlon / 2) * + math.sin(dlon / 2)) + c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a)) + d = radius * c + + return d + + +def build_user_agent(): + """Build a Mozilla/5.0 compatible User-Agent string""" + + global USER_AGENT + if USER_AGENT: + return USER_AGENT + + ua_tuple = ( + 'Mozilla/5.0', + '(%s; U; %s; en-us)' % (platform.system(), platform.architecture()[0]), + 'Python/%s' % platform.python_version(), + '(KHTML, like Gecko)', + 'speedtest-cli/%s' % __version__ + ) + USER_AGENT = ' '.join(ua_tuple) + printer(USER_AGENT, debug=True) + return USER_AGENT + + +def build_request(url, data=None, headers=None, bump=''): + """Build a urllib2 request object + + This function automatically adds a User-Agent header to all requests + + """ + + if not USER_AGENT: + build_user_agent() + + if not headers: + headers = {} + + if url[0] == ':': + schemed_url = '%s%s' % (SCHEME, url) + else: + schemed_url = url + + if '?' in url: + delim = '&' + else: + delim = '?' + + # WHO YOU GONNA CALL? CACHE BUSTERS! + final_url = '%s%sx=%s.%s' % (schemed_url, delim, + int(timeit.time.time() * 1000), + bump) + + headers.update({ + 'User-Agent': USER_AGENT, + 'Cache-Control': 'no-cache', + }) + + printer('%s %s' % (('GET', 'POST')[bool(data)], final_url), + debug=True) + + return Request(final_url, data=data, headers=headers) + + +def catch_request(request): + """Helper function to catch common exceptions encountered when + establishing a connection with a HTTP/HTTPS request + + """ + + try: + uh = urlopen(request) + return uh, False + except HTTP_ERRORS: + e = get_exception() + return None, e + + +def get_response_stream(response): + """Helper function to return either a Gzip reader if + ``Content-Encoding`` is ``gzip`` otherwise the response itself + + """ + + try: + getheader = response.headers.getheader + except AttributeError: + getheader = response.getheader + + if getheader('content-encoding') == 'gzip': + return GzipDecodedResponse(response) + + return response + + +def get_attributes_by_tag_name(dom, tag_name): + """Retrieve an attribute from an XML document and return it in a + consistent format + + Only used with xml.dom.minidom, which is likely only to be used + with python versions older than 2.5 + """ + elem = dom.getElementsByTagName(tag_name)[0] + return dict(list(elem.attributes.items())) + + +def print_dots(current, total, start=False, end=False): + """Built in callback function used by Thread classes for printing + status + """ + + if SHUTDOWN_EVENT.isSet(): + return + + sys.stdout.write('.') + if current + 1 == total and end is True: + sys.stdout.write('\n') + sys.stdout.flush() + + +def do_nothing(*args, **kwargs): + pass + + +class HTTPDownloader(threading.Thread): + """Thread class for retrieving a URL""" + + def __init__(self, i, request, start, timeout): + threading.Thread.__init__(self) + self.request = request + self.result = [0] + self.starttime = start + self.timeout = timeout + self.i = i + + def run(self): + try: + if (timeit.default_timer() - self.starttime) <= self.timeout: + f = urlopen(self.request) + while (not SHUTDOWN_EVENT.isSet() and + (timeit.default_timer() - self.starttime) <= + self.timeout): + self.result.append(len(f.read(10240))) + if self.result[-1] == 0: + break + f.close() + except IOError: + pass + + +class HTTPUploaderData(object): + """File like object to improve cutting off the upload once the timeout + has been reached + """ + + def __init__(self, length, start, timeout): + self.length = length + self.start = start + self.timeout = timeout + + self._data = None + + self.total = [0] + + def _create_data(self): + chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ' + multiplier = int(round(int(self.length) / 36.0)) + IO = BytesIO or StringIO + self._data = IO( + ('content1=%s' % + (chars * multiplier)[0:int(self.length) - 9] + ).encode() + ) + + @property + def data(self): + if not self._data: + self._create_data() + return self._data + + def read(self, n=10240): + if ((timeit.default_timer() - self.start) <= self.timeout and + not SHUTDOWN_EVENT.isSet()): + chunk = self.data.read(n) + self.total.append(len(chunk)) + return chunk + else: + raise SpeedtestUploadTimeout + + def __len__(self): + return self.length + + +class HTTPUploader(threading.Thread): + """Thread class for putting a URL""" + + def __init__(self, i, request, start, size, timeout): + threading.Thread.__init__(self) + self.request = request + self.request.data.start = self.starttime = start + self.size = size + self.result = None + self.timeout = timeout + self.i = i + + def run(self): + request = self.request + try: + if ((timeit.default_timer() - self.starttime) <= self.timeout and + not SHUTDOWN_EVENT.isSet()): + try: + f = urlopen(request) + except TypeError: + # PY24 expects a string or buffer + # This also causes issues with Ctrl-C, but we will concede + # for the moment that Ctrl-C on PY24 isn't immediate + request = build_request(self.request.get_full_url(), + data=request.data.read(self.size)) + f = urlopen(request) + f.read(11) + f.close() + self.result = sum(self.request.data.total) + else: + self.result = 0 + except (IOError, SpeedtestUploadTimeout): + self.result = sum(self.request.data.total) + + +class SpeedtestResults(object): + """Class for holding the results of a speedtest, including: + + Download speed + Upload speed + Ping/Latency to test server + Data about server that the test was run against + + Additionally this class can return a result data as a dictionary or CSV, + as well as submit a POST of the result data to the speedtest.net API + to get a share results image link. + """ + + def __init__(self, download=0, upload=0, ping=0, server=None): + self.download = download + self.upload = upload + self.ping = ping + if server is None: + self.server = {} + else: + self.server = server + self._share = None + self.timestamp = datetime.datetime.utcnow().isoformat() + self.bytes_received = 0 + self.bytes_sent = 0 + + def __repr__(self): + return repr(self.dict()) + + def share(self): + """POST data to the speedtest.net API to obtain a share results + link + """ + + if self._share: + return self._share + + download = int(round(self.download / 1000.0, 0)) + ping = int(round(self.ping, 0)) + upload = int(round(self.upload / 1000.0, 0)) + + # Build the request to send results back to speedtest.net + # We use a list instead of a dict because the API expects parameters + # in a certain order + api_data = [ + 'recommendedserverid=%s' % self.server['id'], + 'ping=%s' % ping, + 'screenresolution=', + 'promo=', + 'download=%s' % download, + 'screendpi=', + 'upload=%s' % upload, + 'testmethod=http', + 'hash=%s' % md5(('%s-%s-%s-%s' % + (ping, upload, download, '297aae72')) + .encode()).hexdigest(), + 'touchscreen=none', + 'startmode=pingselect', + 'accuracy=1', + 'bytesreceived=%s' % self.bytes_received, + 'bytessent=%s' % self.bytes_sent, + 'serverid=%s' % self.server['id'], + ] + + headers = {'Referer': 'http://c.speedtest.net/flash/speedtest.swf'} + request = build_request('://www.speedtest.net/api/api.php', + data='&'.join(api_data).encode(), + headers=headers) + f, e = catch_request(request) + if e: + raise ShareResultsConnectFailure(e) + + response = f.read() + code = f.code + f.close() + + if int(code) != 200: + raise ShareResultsSubmitFailure('Could not submit results to ' + 'speedtest.net') + + qsargs = parse_qs(response.decode()) + resultid = qsargs.get('resultid') + if not resultid or len(resultid) != 1: + raise ShareResultsSubmitFailure('Could not submit results to ' + 'speedtest.net') + + self._share = 'http://www.speedtest.net/result/%s.png' % resultid[0] + + return self._share + + def dict(self): + """Return dictionary of result data""" + + return { + 'download': self.download, + 'upload': self.upload, + 'ping': self.ping, + 'server': self.server, + 'timestamp': self.timestamp + } + + def csv(self, delimiter=','): + """Return data in CSV format""" + + data = self.dict() + out = StringIO() + writer = csv.writer(out, delimiter=delimiter, lineterminator='') + writer.writerow([data['server']['id'], data['server']['sponsor'], + data['server']['name'], data['timestamp'], + data['server']['d'], data['ping'], data['download'], + data['upload']]) + return out.getvalue() + + def json(self, pretty=False): + """Return data in JSON format""" + + kwargs = {} + if pretty: + kwargs.update({ + 'indent': 4, + 'sort_keys': True + }) + return json.dumps(self.dict(), **kwargs) + + +class Speedtest(object): + """Class for performing standard speedtest.net testing operations""" + + def __init__(self, config=None): + self.config = {} + self.get_config() + if config is not None: + self.config.update(config) + + self.servers = {} + self.closest = [] + self.best = {} + + self.results = SpeedtestResults() + + def get_config(self): + """Download the speedtest.net configuration and return only the data + we are interested in + """ + + headers = {} + if gzip: + headers['Accept-Encoding'] = 'gzip' + request = build_request('://www.speedtest.net/speedtest-config.php', + headers=headers) + uh, e = catch_request(request) + if e: + raise ConfigRetrievalError(e) + configxml = [] + + stream = get_response_stream(uh) + + while True: + configxml.append(stream.read(1024)) + if len(configxml[-1]) == 0: + break + stream.close() + uh.close() + + if int(uh.code) != 200: + return None + + printer(''.encode().join(configxml), debug=True) + + try: + root = ET.fromstring(''.encode().join(configxml)) + server_config = root.find('server-config').attrib + download = root.find('download').attrib + upload = root.find('upload').attrib + # times = root.find('times').attrib + client = root.find('client').attrib + + except AttributeError: + root = DOM.parseString(''.join(configxml)) + server_config = get_attributes_by_tag_name(root, 'server-config') + download = get_attributes_by_tag_name(root, 'download') + upload = get_attributes_by_tag_name(root, 'upload') + # times = get_attributes_by_tag_name(root, 'times') + client = get_attributes_by_tag_name(root, 'client') + + ignore_servers = list( + map(int, server_config['ignoreids'].split(',')) + ) + + ratio = int(upload['ratio']) + upload_max = int(upload['maxchunkcount']) + up_sizes = [32768, 65536, 131072, 262144, 524288, 1048576, 7340032] + sizes = { + 'upload': up_sizes[ratio - 1:], + 'download': [350, 500, 750, 1000, 1500, 2000, 2500, + 3000, 3500, 4000] + } + + counts = { + 'upload': int(upload_max * 2 / len(sizes['upload'])), + 'download': int(download['threadsperurl']) + } + + threads = { + 'upload': int(upload['threads']), + 'download': int(server_config['threadcount']) * 2 + } + + length = { + 'upload': int(upload['testlength']), + 'download': int(download['testlength']) + } + + self.config.update({ + 'client': client, + 'ignore_servers': ignore_servers, + 'sizes': sizes, + 'counts': counts, + 'threads': threads, + 'length': length, + 'upload_max': upload_max + }) + + self.lat_lon = (float(client['lat']), float(client['lon'])) + + return self.config + + def get_servers(self, servers=None): + """Retrieve a the list of speedtest.net servers, optionally filtered + to servers matching those specified in the ``servers`` argument + """ + if servers is None: + servers = [] + + self.servers.clear() + + for i, s in enumerate(servers): + try: + servers[i] = int(s) + except ValueError: + raise InvalidServerIDType('%s is an invalid server type, must ' + 'be int' % s) + + urls = [ + '://www.speedtest.net/speedtest-servers-static.php', + 'http://c.speedtest.net/speedtest-servers-static.php', + '://www.speedtest.net/speedtest-servers.php', + 'http://c.speedtest.net/speedtest-servers.php', + ] + + headers = {} + if gzip: + headers['Accept-Encoding'] = 'gzip' + + errors = [] + for url in urls: + try: + request = build_request('%s?threads=%s' % + (url, + self.config['threads']['download']), + headers=headers) + uh, e = catch_request(request) + if e: + errors.append('%s' % e) + raise ServersRetrievalError + + stream = get_response_stream(uh) + + serversxml = [] + while True: + serversxml.append(stream.read(1024)) + if len(serversxml[-1]) == 0: + break + + stream.close() + uh.close() + + if int(uh.code) != 200: + raise ServersRetrievalError + + printer(''.encode().join(serversxml), debug=True) + + try: + try: + root = ET.fromstring(''.encode().join(serversxml)) + elements = root.getiterator('server') + except AttributeError: + root = DOM.parseString(''.join(serversxml)) + elements = root.getElementsByTagName('server') + except (SyntaxError, xml.parsers.expat.ExpatError): + raise ServersRetrievalError + + for server in elements: + try: + attrib = server.attrib + except AttributeError: + attrib = dict(list(server.attributes.items())) + + if servers and int(attrib.get('id')) not in servers: + continue + + if int(attrib.get('id')) in self.config['ignore_servers']: + continue + + try: + d = distance(self.lat_lon, + (float(attrib.get('lat')), + float(attrib.get('lon')))) + except: + continue + + attrib['d'] = d + + try: + self.servers[d].append(attrib) + except KeyError: + self.servers[d] = [attrib] + + printer(''.encode().join(serversxml), debug=True) + + break + + except ServersRetrievalError: + continue + + if servers and not self.servers: + raise NoMatchedServers + + return self.servers + + def set_mini_server(self, server): + """Instead of querying for a list of servers, set a link to a + speedtest mini server + """ + + urlparts = urlparse(server) + + name, ext = os.path.splitext(urlparts[2]) + if ext: + url = os.path.dirname(server) + else: + url = server + + request = build_request(url) + uh, e = catch_request(request) + if e: + raise SpeedtestMiniConnectFailure('Failed to connect to %s' % + server) + else: + text = uh.read() + uh.close() + + extension = re.findall('upload_?[Ee]xtension: "([^"]+)"', + text.decode()) + if not extension: + for ext in ['php', 'asp', 'aspx', 'jsp']: + try: + f = urlopen('%s/speedtest/upload.%s' % (url, ext)) + except: + pass + else: + data = f.read().strip().decode() + if (f.code == 200 and + len(data.splitlines()) == 1 and + re.match('size=[0-9]', data)): + extension = [ext] + break + if not urlparts or not extension: + raise InvalidSpeedtestMiniServer('Invalid Speedtest Mini Server: ' + '%s' % server) + + self.servers = [{ + 'sponsor': 'Speedtest Mini', + 'name': urlparts[1], + 'd': 0, + 'url': '%s/speedtest/upload.%s' % (url.rstrip('/'), extension[0]), + 'latency': 0, + 'id': 0 + }] + + return self.servers + + def get_closest_servers(self, limit=5): + """Limit servers to the closest speedtest.net servers based on + geographic distance + """ + + if not self.servers: + self.get_servers() + + for d in sorted(self.servers.keys()): + for s in self.servers[d]: + self.closest.append(s) + if len(self.closest) == limit: + break + else: + continue + break + + printer(self.closest, debug=True) + return self.closest + + def get_best_server(self, servers=None): + """Perform a speedtest.net "ping" to determine which speedtest.net + server has the lowest latency + """ + + if not servers: + if not self.closest: + servers = self.get_closest_servers() + servers = self.closest + + results = {} + for server in servers: + cum = [] + url = os.path.dirname(server['url']) + urlparts = urlparse('%s/latency.txt' % url) + printer('%s %s/latency.txt' % ('GET', url), debug=True) + for _ in range(0, 3): + try: + if urlparts[0] == 'https': + h = HTTPSConnection(urlparts[1]) + else: + h = HTTPConnection(urlparts[1]) + headers = {'User-Agent': USER_AGENT} + start = timeit.default_timer() + h.request("GET", urlparts[2], headers=headers) + r = h.getresponse() + total = (timeit.default_timer() - start) + except HTTP_ERRORS: + e = get_exception() + printer('%r' % e, debug=True) + cum.append(3600) + continue + + text = r.read(9) + if int(r.status) == 200 and text == 'test=test'.encode(): + cum.append(total) + else: + cum.append(3600) + h.close() + + avg = round((sum(cum) / 6) * 1000.0, 3) + results[avg] = server + + try: + fastest = sorted(results.keys())[0] + except IndexError: + raise SpeedtestBestServerFailure('Unable to connect to servers to ' + 'test latency.') + best = results[fastest] + best['latency'] = fastest + + self.results.ping = fastest + self.results.server = best + + self.best.update(best) + printer(best, debug=True) + return best + + def download(self, callback=do_nothing): + """Test download speed against speedtest.net""" + + urls = [] + for size in self.config['sizes']['download']: + for _ in range(0, self.config['counts']['download']): + urls.append('%s/random%sx%s.jpg' % + (os.path.dirname(self.best['url']), size, size)) + + request_count = len(urls) + requests = [] + for i, url in enumerate(urls): + requests.append(build_request(url, bump=i)) + + def producer(q, requests, request_count): + for i, request in enumerate(requests): + thread = HTTPDownloader(i, request, start, + self.config['length']['download']) + thread.start() + q.put(thread, True) + callback(i, request_count, start=True) + + finished = [] + + def consumer(q, request_count): + while len(finished) < request_count: + thread = q.get(True) + while thread.isAlive(): + thread.join(timeout=0.1) + finished.append(sum(thread.result)) + callback(thread.i, request_count, end=True) + + q = Queue(self.config['threads']['download']) + prod_thread = threading.Thread(target=producer, + args=(q, requests, request_count)) + cons_thread = threading.Thread(target=consumer, + args=(q, request_count)) + start = timeit.default_timer() + prod_thread.start() + cons_thread.start() + while prod_thread.isAlive(): + prod_thread.join(timeout=0.1) + while cons_thread.isAlive(): + cons_thread.join(timeout=0.1) + + stop = timeit.default_timer() + self.results.bytes_received = sum(finished) + self.results.download = ( + (self.results.bytes_received / (stop - start)) * 8.0 + ) + if self.results.download > 100000: + self.config['threads']['upload'] = 8 + return self.results.download + + def upload(self, callback=do_nothing): + """Test upload speed against speedtest.net""" + + sizes = [] + + for size in self.config['sizes']['upload']: + for _ in range(0, self.config['counts']['upload']): + sizes.append(size) + + # request_count = len(sizes) + request_count = self.config['upload_max'] + + requests = [] + for i, size in enumerate(sizes): + # We set ``0`` for ``start`` and handle setting the actual + # ``start`` in ``HTTPUploader`` to get better measurements + requests.append( + ( + build_request( + self.best['url'], + HTTPUploaderData(size, 0, + self.config['length']['upload']) + ), + size + ) + ) + + def producer(q, requests, request_count): + for i, request in enumerate(requests[:request_count]): + thread = HTTPUploader(i, request[0], start, request[1], + self.config['length']['upload']) + thread.start() + q.put(thread, True) + callback(i, request_count, start=True) + + finished = [] + + def consumer(q, request_count): + while len(finished) < request_count: + thread = q.get(True) + while thread.isAlive(): + thread.join(timeout=0.1) + finished.append(thread.result) + callback(thread.i, request_count, end=True) + + q = Queue(self.config['threads']['upload']) + prod_thread = threading.Thread(target=producer, + args=(q, requests, request_count)) + cons_thread = threading.Thread(target=consumer, + args=(q, request_count)) + start = timeit.default_timer() + prod_thread.start() + cons_thread.start() + while prod_thread.isAlive(): + prod_thread.join(timeout=0.1) + while cons_thread.isAlive(): + cons_thread.join(timeout=0.1) + + stop = timeit.default_timer() + self.results.bytes_sent = sum(finished) + self.results.upload = ( + (self.results.bytes_sent / (stop - start)) * 8.0 + ) + return self.results.upload + + +def ctrl_c(signum, frame): + """Catch Ctrl-C key sequence and set a SHUTDOWN_EVENT for our threaded + operations + """ + + SHUTDOWN_EVENT.set() + print_('\nCancelling...') + sys.exit(0) + + +def version(): + """Print the version""" + + print_(__version__) + sys.exit(0) + + +def csv_header(): + """Print the CSV Headers""" + + print_('Server ID,Sponsor,Server Name,Timestamp,Distance,Ping,Download,' + 'Upload') + sys.exit(0) + + +def parse_args(): + """Function to handle building and parsing of command line arguments""" + description = ( + 'Command line interface for testing internet bandwidth using ' + 'speedtest.net.\n' + '------------------------------------------------------------' + '--------------\n' + 'https://github.com/sivel/speedtest-cli') + + parser = ArgParser(description=description) + # Give optparse.OptionParser an `add_argument` method for + # compatibility with argparse.ArgumentParser + try: + parser.add_argument = parser.add_option + except AttributeError: + pass + parser.add_argument('--bytes', dest='units', action='store_const', + const=('byte', 8), default=('bit', 1), + help='Display values in bytes instead of bits. Does ' + 'not affect the image generated by --share, nor ' + 'output from --json or --csv') + parser.add_argument('--share', action='store_true', + help='Generate and provide a URL to the speedtest.net ' + 'share results image') + parser.add_argument('--simple', action='store_true', default=False, + help='Suppress verbose output, only show basic ' + 'information') + parser.add_argument('--csv', action='store_true', default=False, + help='Suppress verbose output, only show basic ' + 'information in CSV format. Speeds listed in ' + 'bit/s and not affected by --bytes') + parser.add_argument('--csv-delimiter', default=',', type=PARSER_TYPE_STR, + help='Single character delimiter to use in CSV ' + 'output. Default ","') + parser.add_argument('--csv-header', action='store_true', default=False, + help='Print CSV headers') + parser.add_argument('--json', action='store_true', default=False, + help='Suppress verbose output, only show basic ' + 'information in JSON format. Speeds listed in ' + 'bit/s and not affected by --bytes') + parser.add_argument('--list', action='store_true', + help='Display a list of speedtest.net servers ' + 'sorted by distance') + parser.add_argument('--server', help='Specify a server ID to test against', + type=PARSER_TYPE_INT) + parser.add_argument('--mini', help='URL of the Speedtest Mini server') + parser.add_argument('--source', help='Source IP address to bind to') + parser.add_argument('--timeout', default=10, type=PARSER_TYPE_INT, + help='HTTP timeout in seconds. Default 10') + parser.add_argument('--secure', action='store_true', + help='Use HTTPS instead of HTTP when communicating ' + 'with speedtest.net operated servers') + parser.add_argument('--version', action='store_true', + help='Show the version number and exit') + parser.add_argument('--debug', action='store_true', + help=ARG_SUPPRESS, default=ARG_SUPPRESS) + + options = parser.parse_args() + if isinstance(options, tuple): + args = options[0] + else: + args = options + return args + + +def validate_optional_args(args): + """Check if an argument was provided that depends on a module that may + not be part of the Python standard library. + + If such an argument is supplied, and the module does not exist, exit + with an error stating which module is missing. + """ + optional_args = { + 'json': ('json/simplejson python module', json), + 'secure': ('SSL support', HTTPSConnection), + } + + for arg, info in optional_args.items(): + if getattr(args, arg, False) and info[1] is None: + raise SystemExit('%s is not installed. --%s is ' + 'unavailable' % (info[0], arg)) + + +def printer(string, quiet=False, debug=False, **kwargs): + """Helper function to print a string only when not quiet""" + + if debug and not DEBUG: + return + + if debug: + out = '\033[1;30mDEBUG: %s\033[0m' % string + else: + out = string + + if not quiet: + print_(out, **kwargs) + + +def shell(): + """Run the full speedtest.net test""" + + global SHUTDOWN_EVENT, SOURCE, SCHEME, DEBUG + SHUTDOWN_EVENT = threading.Event() + + signal.signal(signal.SIGINT, ctrl_c) + + args = parse_args() + + # Print the version and exit + if args.version: + version() + + if args.csv_header: + csv_header() + + if len(args.csv_delimiter) != 1: + raise SystemExit('--csv-delimiter must be a single character') + + validate_optional_args(args) + + socket.setdefaulttimeout(args.timeout) + + # If specified bind to a specific IP address + if args.source: + SOURCE = args.source + socket.socket = bound_socket + + if args.secure: + SCHEME = 'https' + + debug = getattr(args, 'debug', False) + if debug == 'SUPPRESSHELP': + debug = False + if debug: + DEBUG = True + + # Pre-cache the user agent string + build_user_agent() + + if args.simple or args.csv or args.json: + quiet = True + else: + quiet = False + + # Don't set a callback if we are running quietly + if quiet or debug: + callback = do_nothing + else: + callback = print_dots + + printer('Retrieving speedtest.net configuration...', quiet) + try: + speedtest = Speedtest() + except (ConfigRetrievalError, HTTP_ERRORS): + printer('Cannot retrieve speedtest configuration') + raise SpeedtestCLIError(get_exception()) + + if args.list: + try: + speedtest.get_servers() + except (ServersRetrievalError, HTTP_ERRORS): + print_('Cannot retrieve speedtest server list') + raise SpeedtestCLIError(get_exception()) + + for _, servers in sorted(speedtest.servers.items()): + for server in servers: + line = ('%(id)5s) %(sponsor)s (%(name)s, %(country)s) ' + '[%(d)0.2f km]' % server) + try: + print_(line) + except IOError: + e = get_exception() + if e.errno != errno.EPIPE: + raise + sys.exit(0) + + # Set a filter of servers to retrieve + servers = [] + if args.server: + servers.append(args.server) + + printer('Testing from %(isp)s (%(ip)s)...' % speedtest.config['client'], + quiet) + + if not args.mini: + printer('Retrieving speedtest.net server list...', quiet) + try: + speedtest.get_servers(servers) + except NoMatchedServers: + raise SpeedtestCLIError('No matched servers: %s' % args.server) + except (ServersRetrievalError, HTTP_ERRORS): + print_('Cannot retrieve speedtest server list') + raise SpeedtestCLIError(get_exception()) + except InvalidServerIDType: + raise SpeedtestCLIError('%s is an invalid server type, must ' + 'be an int' % args.server) + + printer('Selecting best server based on ping...', quiet) + speedtest.get_best_server() + elif args.mini: + speedtest.get_best_server(speedtest.set_mini_server(args.mini)) + + results = speedtest.results + + printer('Hosted by %(sponsor)s (%(name)s) [%(d)0.2f km]: ' + '%(latency)s ms' % results.server, quiet) + + printer('Testing download speed', quiet, + end=('', '\n')[bool(debug)]) + speedtest.download(callback=callback) + printer('Download: %0.2f M%s/s' % + ((results.download / 1000.0 / 1000.0) / args.units[1], + args.units[0]), + quiet) + + printer('Testing upload speed', quiet, + end=('', '\n')[bool(debug)]) + speedtest.upload(callback=callback) + printer('Upload: %0.2f M%s/s' % + ((results.upload / 1000.0 / 1000.0) / args.units[1], + args.units[0]), + quiet) + + if args.simple: + print_('Ping: %s ms\nDownload: %0.2f M%s/s\nUpload: %0.2f M%s/s' % + (results.ping, + (results.download / 1000.0 / 1000.0) / args.units[1], + args.units[0], + (results.upload / 1000.0 / 1000.0) / args.units[1], + args.units[0])) + elif args.csv: + print_(results.csv(delimiter=args.csv_delimiter)) + elif args.json: + print_(results.json()) + + if args.share: + printer('Share results: %s' % results.share(), quiet) + + +def main(): + try: + shell() + except KeyboardInterrupt: + print_('\nCancelling...') + except (SpeedtestException, SystemExit): + e = get_exception() + if getattr(e, 'code', 1) != 0: + raise SystemExit('ERROR: %s' % e) + + +if __name__ == '__main__': + main() + +# vim:ts=4:sw=4:expandtab diff --git a/speedtest/speedtest_cli.py b/speedtest/speedtest_cli.py new file mode 100644 index 0000000..1d0fb29 --- /dev/null +++ b/speedtest/speedtest_cli.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +# Copyright 2012-2016 Matt Martz +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +DEPRECATED_MSG = ('The file speedtest_cli.py has been deprecated in favor of ' + 'speedtest.py\nand is available for download at:\n\n' + 'https://raw.githubusercontent.com/sivel/speedtest-cli/' + 'master/speedtest.py') + + +if __name__ == '__main__': + raise SystemExit(DEPRECATED_MSG) +else: + try: + from speedtest import * + except ImportError: + raise SystemExit(DEPRECATED_MSG) + else: + warnings.warn(DEPRECATED_MSG, UserWarning) diff --git a/speedtest/tox.ini b/speedtest/tox.ini new file mode 100644 index 0000000..477fa1c --- /dev/null +++ b/speedtest/tox.ini @@ -0,0 +1,21 @@ +[tox] +skipsdist=true + +[testenv] +commands = + {envpython} -V + {envpython} -m compileall speedtest.py + {envpython} speedtest.py + +[testenv:flake8] +basepython=python +deps=flake8 +commands = + {envpython} -V + flake8 speedtest.py + +[testenv:pypy] +commands = + pypy -V + pypy -m compileall speedtest.py + pypy speedtest.py diff --git a/speedtest_thread.py b/speedtest_thread.py new file mode 100644 index 0000000..e8a9e65 --- /dev/null +++ b/speedtest_thread.py @@ -0,0 +1,194 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +import logging +import time +import sys +import os +import configloader +import importloader +from speedtest import speedtest +from shadowsocks import common, shell + +class Speedtest(object): + + def __init__(self): + import threading + self.event = threading.Event() + self.has_stopped = False + + def speedtest_thread(self): + if self.event.wait(600): + return + + logging.info("Speedtest starting...You can't stop right now!") + CTid = 0 + speedtest_ct = speedtest.Speedtest() + speedtest_ct.get_servers() + servers_list = [] + for _, servers in sorted(speedtest_ct.servers.items()): + for server in servers: + if server['country'].find( + 'China') != -1 and server['sponsor'].find('Telecom') != -1: + servers_list.append(server) + speedtest_ct.get_best_server(servers_list) + results_ct = speedtest_ct.results + CTPing = str(results_ct.server['latency']) + ' ms' + speedtest_ct.download() + CTDLSpeed = str( + round( + (results_ct.download / 1000 / 1000), + 2)) + " Mbit/s" + speedtest_ct.upload() + CTUpSpeed = str( + round( + (results_ct.upload / 1000 / 1000), + 2)) + " Mbit/s" + + CUid = 0 + speedtest_cu = speedtest.Speedtest() + speedtest_cu.get_servers() + servers_list = [] + for _, servers in sorted(speedtest_cu.servers.items()): + for server in servers: + if server['country'].find( + 'China') != -1 and server['sponsor'].find('Unicom') != -1: + servers_list.append(server) + speedtest_cu.get_best_server(servers_list) + results_cu = speedtest_cu.results + CUPing = str(results_cu.server['latency']) + ' ms' + speedtest_cu.download() + CUDLSpeed = str( + round( + (results_cu.download / 1000 / 1000), + 2)) + " Mbit/s" + speedtest_cu.upload() + CUUpSpeed = str( + round( + (results_cu.upload / 1000 / 1000), + 2)) + " Mbit/s" + + CMid = 0 + speedtest_cm = speedtest.Speedtest() + speedtest_cm.get_servers() + servers_list = [] + for _, servers in sorted(speedtest_cm.servers.items()): + for server in servers: + if server['country'].find( + 'China') != -1 and server['sponsor'].find('Mobile') != -1: + servers_list.append(server) + speedtest_cm.get_best_server(servers_list) + results_cm = speedtest_cm.results + CMPing = str(results_cm.server['latency']) + ' ms' + speedtest_cm.download() + CMDLSpeed = str( + round( + (results_cm.download / 1000 / 1000), + 2)) + " Mbit/s" + speedtest_cm.upload() + CMUpSpeed = str( + round( + (results_cm.upload / 1000 / 1000), + 2)) + " Mbit/s" + + if configloader.get_config().API_INTERFACE == 'modwebapi': + webapi.postApi('func/speedtest', + {'node_id': configloader.get_config().NODE_ID}, + {'data': [{'telecomping': CTPing, + 'telecomeupload': CTUpSpeed, + 'telecomedownload': CTDLSpeed, + 'unicomping': CUPing, + 'unicomupload': CUUpSpeed, + 'unicomdownload': CUDLSpeed, + 'cmccping': CMPing, + 'cmccupload': CMUpSpeed, + 'cmccdownload': CMDLSpeed}]}) + else: + import cymysql + if configloader.get_config().MYSQL_SSL_ENABLE == 1: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8', + ssl={ + 'ca': configloader.get_config().MYSQL_SSL_CA, + 'cert': configloader.get_config().MYSQL_SSL_CERT, + 'key': configloader.get_config().MYSQL_SSL_KEY}) + else: + conn = cymysql.connect( + host=configloader.get_config().MYSQL_HOST, + port=configloader.get_config().MYSQL_PORT, + user=configloader.get_config().MYSQL_USER, + passwd=configloader.get_config().MYSQL_PASS, + db=configloader.get_config().MYSQL_DB, + charset='utf8') + conn.autocommit(True) + cur = conn.cursor() + cur.execute( + "INSERT INTO `speedtest` (`id`, `nodeid`, `datetime`, `telecomping`, `telecomeupload`, `telecomedownload`, `unicomping`, `unicomupload`, `unicomdownload`, `cmccping`, `cmccupload`, `cmccdownload`) VALUES (NULL, '" + + str( + configloader.get_config().NODE_ID) + + "', unix_timestamp(), '" + + CTPing + + "', '" + + CTUpSpeed + + "', '" + + CTDLSpeed + + "', '" + + CUPing + + "', '" + + CUUpSpeed + + "', '" + + CUDLSpeed + + "', '" + + CMPing + + "', '" + + CMUpSpeed + + "', '" + + CMDLSpeed + + "')") + cur.close() + conn.close() + + logging.info("Speedtest finished") + + @staticmethod + def thread_db(obj): + + if configloader.get_config().SPEEDTEST == 0: + return + + if configloader.get_config().API_INTERFACE == 'modwebapi': + import webapi_utils + + global webapi + webapi = webapi_utils.WebApi() + + global db_instance + db_instance = obj() + + try: + while True: + try: + db_instance.speedtest_thread() + except Exception as e: + import traceback + trace = traceback.format_exc() + logging.error(trace) + #logging.warn('db thread except:%s' % e) + if db_instance.event.wait(configloader.get_config().SPEEDTEST * 3600): + break + if db_instance.has_stopped: + break + except KeyboardInterrupt as e: + pass + db_instance = None + + @staticmethod + def thread_db_stop(): + global db_instance + db_instance.has_stopped = True + db_instance.event.set() diff --git a/stop.sh b/stop.sh new file mode 100644 index 0000000..7641f5e --- /dev/null +++ b/stop.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +eval $(ps -ef | grep "[0-9] python server\\.py m" | awk '{print "kill -9 "$2}') diff --git a/switchrule.py b/switchrule.py new file mode 100644 index 0000000..8f3d59b --- /dev/null +++ b/switchrule.py @@ -0,0 +1,28 @@ +from configloader import load_config, get_config + + +def getKeys(): + key_list = ['id', 'port', 'u', 'd', 'transfer_enable', 'passwd', 'enable'] + if get_config().API_INTERFACE == 'sspanelv3': + key_list += ['method'] + elif get_config().API_INTERFACE == 'sspanelv3ssr': + key_list += ['method', 'obfs', 'protocol'] + elif get_config().API_INTERFACE == 'glzjinmod': + key_list += ['method', + 'obfs', + 'obfs_param', + 'protocol', + 'protocol_param', + 'id', + 'node_speedlimit', + 'forbidden_ip', + 'forbidden_port', + 'disconnect_ip', + 'is_multi_user'] + return key_list + # return key_list + ['plan'] # append the column name 'plan' + + +def isTurnOn(row): + return True + # return row['plan'] == 'B' # then judge here diff --git a/tail.sh b/tail.sh new file mode 100644 index 0000000..f36f605 --- /dev/null +++ b/tail.sh @@ -0,0 +1,3 @@ +#!/bin/bash +cd `dirname $0` +tail -f ssserver.log diff --git a/tests/aes-cfb1.json b/tests/aes-cfb1.json new file mode 100644 index 0000000..70ae670 --- /dev/null +++ b/tests/aes-cfb1.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb1", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/aes-cfb8.json b/tests/aes-cfb8.json new file mode 100644 index 0000000..fe8b715 --- /dev/null +++ b/tests/aes-cfb8.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb8", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/aes-ctr.json b/tests/aes-ctr.json new file mode 100644 index 0000000..1f5a1d7 --- /dev/null +++ b/tests/aes-ctr.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-ctr", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/aes.json b/tests/aes.json new file mode 100644 index 0000000..2fc29f3 --- /dev/null +++ b/tests/aes.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/assert.sh b/tests/assert.sh new file mode 100644 index 0000000..b0c679c --- /dev/null +++ b/tests/assert.sh @@ -0,0 +1,148 @@ +#!/bin/bash +# assert.sh 1.0 - bash unit testing framework +# Copyright (C) 2009, 2010, 2011, 2012 Robert Lehmann +# +# http://github.com/lehmannro/assert.sh +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program. If not, see . + +export DISCOVERONLY=${DISCOVERONLY:-} +export DEBUG=${DEBUG:-} +export STOP=${STOP:-} +export INVARIANT=${INVARIANT:-} +export CONTINUE=${CONTINUE:-} + +args="$(getopt -n "$0" -l \ + verbose,help,stop,discover,invariant,continue vhxdic $*)" \ +|| exit -1 +for arg in $args; do + case "$arg" in + -h) + echo "$0 [-vxidc]" \ + "[--verbose] [--stop] [--invariant] [--discover] [--continue]" + echo "`sed 's/./ /g' <<< "$0"` [-h] [--help]" + exit 0;; + --help) + cat < [stdin] + (( tests_ran++ )) || : + [[ -n "$DISCOVERONLY" ]] && return || true + # printf required for formatting + printf -v expected "x${2:-}" # x required to overwrite older results + result="$(eval 2>/dev/null $1 <<< ${3:-})" || true + # Note: $expected is already decorated + if [[ "x$result" == "$expected" ]]; then + [[ -n "$DEBUG" ]] && echo -n . || true + return + fi + result="$(sed -e :a -e '$!N;s/\n/\\n/;ta' <<< "$result")" + [[ -z "$result" ]] && result="nothing" || result="\"$result\"" + [[ -z "$2" ]] && expected="nothing" || expected="\"$2\"" + _assert_fail "expected $expected${_indent}got $result" "$1" "$3" +} + +assert_raises() { + # assert_raises [stdin] + (( tests_ran++ )) || : + [[ -n "$DISCOVERONLY" ]] && return || true + status=0 + (eval $1 <<< ${3:-}) > /dev/null 2>&1 || status=$? + expected=${2:-0} + if [[ "$status" -eq "$expected" ]]; then + [[ -n "$DEBUG" ]] && echo -n . || true + return + fi + _assert_fail "program terminated with code $status instead of $expected" "$1" "$3" +} + +_assert_fail() { + # _assert_fail + [[ -n "$DEBUG" ]] && echo -n X + report="test #$tests_ran \"$2${3:+ <<< $3}\" failed:${_indent}$1" + if [[ -n "$STOP" ]]; then + [[ -n "$DEBUG" ]] && echo + echo "$report" + exit 1 + fi + tests_errors[$tests_failed]="$report" + (( tests_failed++ )) || : +} + +_assert_reset +: ${tests_suite_status:=0} # remember if any of the tests failed so far +_assert_cleanup() { + local status=$? + # modify exit code if it's not already non-zero + [[ $status -eq 0 && -z $CONTINUE ]] && exit $tests_suite_status +} +trap _assert_cleanup EXIT diff --git a/tests/chacha20.json b/tests/chacha20.json new file mode 100644 index 0000000..fc2af8e --- /dev/null +++ b/tests/chacha20.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"salsa20_password", + "timeout":60, + "method":"chacha20", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/client-multi-server-ip.json b/tests/client-multi-server-ip.json new file mode 100644 index 0000000..3050c11 --- /dev/null +++ b/tests/client-multi-server-ip.json @@ -0,0 +1,10 @@ +{ + "server":["127.0.0.1", "127.0.0.1"], + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/coverage_server.py b/tests/coverage_server.py new file mode 100644 index 0000000..23cc8cd --- /dev/null +++ b/tests/coverage_server.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +if __name__ == '__main__': + import tornado.ioloop + import tornado.web + import urllib + + class MainHandler(tornado.web.RequestHandler): + def get(self, project): + try: + with open('/tmp/%s-coverage' % project, 'rb') as f: + coverage = f.read().strip() + n = int(coverage.strip('%')) + if n >= 80: + color = 'brightgreen' + else: + color = 'yellow' + self.redirect(('https://img.shields.io/badge/' + 'coverage-%s-%s.svg' + '?style=flat') % + (urllib.quote(coverage), color)) + except IOError: + raise tornado.web.HTTPError(404) + + application = tornado.web.Application([ + (r"/([a-zA-Z0-9\-_]+)", MainHandler), + ]) + + if __name__ == "__main__": + application.listen(8888, address='127.0.0.1') + tornado.ioloop.IOLoop.instance().start() diff --git a/tests/fastopen.json b/tests/fastopen.json new file mode 100644 index 0000000..b4f60d4 --- /dev/null +++ b/tests/fastopen.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"fastopen_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "fast_open":true +} diff --git a/tests/ipv6-client-side.json b/tests/ipv6-client-side.json new file mode 100644 index 0000000..e14bb07 --- /dev/null +++ b/tests/ipv6-client-side.json @@ -0,0 +1,10 @@ +{ + "server":"::1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/ipv6.json b/tests/ipv6.json new file mode 100644 index 0000000..18263ee --- /dev/null +++ b/tests/ipv6.json @@ -0,0 +1,10 @@ +{ + "server":"::", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/jenkins.sh b/tests/jenkins.sh new file mode 100644 index 0000000..ea5c163 --- /dev/null +++ b/tests/jenkins.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +result=0 + +function run_test { + printf '\e[0;36m' + echo "running test: $command $@" + printf '\e[0m' + + $command "$@" + status=$? + if [ $status -ne 0 ]; then + printf '\e[0;31m' + echo "test failed: $command $@" + printf '\e[0m' + echo + result=1 + else + printf '\e[0;32m' + echo OK + printf '\e[0m' + echo + fi + return 0 +} + +python --version +coverage erase +mkdir tmp +run_test pep8 --ignore=E402 . +run_test pyflakes . +run_test coverage run tests/nose_plugin.py -v +run_test python setup.py sdist +run_test tests/test_daemon.sh +run_test python tests/test.py --with-coverage -c tests/aes.json +run_test python tests/test.py --with-coverage -c tests/aes-ctr.json +run_test python tests/test.py --with-coverage -c tests/aes-cfb1.json +run_test python tests/test.py --with-coverage -c tests/aes-cfb8.json +run_test python tests/test.py --with-coverage -c tests/rc4-md5.json +run_test python tests/test.py --with-coverage -c tests/salsa20.json +run_test python tests/test.py --with-coverage -c tests/chacha20.json +run_test python tests/test.py --with-coverage -c tests/table.json +run_test python tests/test.py --with-coverage -c tests/server-multi-ports.json +run_test python tests/test.py --with-coverage -s tests/aes.json -c tests/client-multi-server-ip.json +run_test python tests/test.py --with-coverage -s tests/server-multi-passwd.json -c tests/server-multi-passwd-client-side.json +run_test python tests/test.py --with-coverage -c tests/workers.json +run_test python tests/test.py --with-coverage -s tests/ipv6.json -c tests/ipv6-client-side.json +run_test python tests/test.py --with-coverage -b "-m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -q" -a "-m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -l 1081 -vv" +run_test python tests/test.py --with-coverage -b "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 --workers 1" -a "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 -l 1081 -t 30 -qq -b 127.0.0.1" +run_test python tests/test.py --with-coverage --should-fail --url="http://127.0.0.1/" -b "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 --forbidden-ip=127.0.0.1,::1,8.8.8.8" -a "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 -l 1081 -t 30 -b 127.0.0.1" + +# test if DNS works +run_test python tests/test.py --with-coverage -c tests/aes.json --url="https://clients1.google.com/generate_204" + +# test localhost is in the forbidden list by default +run_test python tests/test.py --with-coverage --should-fail --tcp-only --url="http://127.0.0.1/" -b "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388" -a "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 -l 1081 -t 30 -b 127.0.0.1" + +# test localhost is available when forbidden list is empty +run_test python tests/test.py --with-coverage --tcp-only --url="http://127.0.0.1/" -b "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 --forbidden-ip=" -a "-m aes-256-cfb -k testrc4 -s 127.0.0.1 -p 8388 -l 1081 -t 30 -b 127.0.0.1" + +if [ -f /proc/sys/net/ipv4/tcp_fastopen ] ; then + if [ 3 -eq `cat /proc/sys/net/ipv4/tcp_fastopen` ] ; then + # we have to run it twice: + # the first time there's no syn cookie + # the second time there is syn cookie + run_test python tests/test.py --with-coverage -c tests/fastopen.json + run_test python tests/test.py --with-coverage -c tests/fastopen.json + fi +fi + +run_test tests/test_large_file.sh +run_test tests/test_udp_src.sh +run_test tests/test_command.sh + +coverage combine && coverage report --include=shadowsocks/* +rm -rf htmlcov +rm -rf tmp +coverage html --include=shadowsocks/* + +coverage report --include=shadowsocks/* | tail -n1 | rev | cut -d' ' -f 1 | rev > /tmp/shadowsocks-coverage + +exit $result diff --git a/tests/libsodium/install.sh b/tests/libsodium/install.sh new file mode 100644 index 0000000..b0e35fa --- /dev/null +++ b/tests/libsodium/install.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ ! -d libsodium-1.0.1 ]; then + wget https://github.com/jedisct1/libsodium/releases/download/1.0.1/libsodium-1.0.1.tar.gz || exit 1 + tar xf libsodium-1.0.1.tar.gz || exit 1 +fi +pushd libsodium-1.0.1 +./configure && make -j2 && make install || exit 1 +sudo ldconfig +popd diff --git a/tests/nose_plugin.py b/tests/nose_plugin.py new file mode 100644 index 0000000..86b1a86 --- /dev/null +++ b/tests/nose_plugin.py @@ -0,0 +1,43 @@ +#!/usr/bin/env python +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import nose +from nose.plugins.base import Plugin + + +class ExtensionPlugin(Plugin): + + name = "ExtensionPlugin" + + def options(self, parser, env): + Plugin.options(self, parser, env) + + def configure(self, options, config): + Plugin.configure(self, options, config) + self.enabled = True + + def wantFile(self, file): + return file.endswith('.py') + + def wantDirectory(self, directory): + return True + + def wantModule(self, file): + return True + + +if __name__ == '__main__': + nose.main(addplugins=[ExtensionPlugin()]) diff --git a/tests/rc4-md5.json b/tests/rc4-md5.json new file mode 100644 index 0000000..e69b433 --- /dev/null +++ b/tests/rc4-md5.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"aes_password", + "timeout":60, + "method":"rc4-md5", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/salsa20-ctr.json b/tests/salsa20-ctr.json new file mode 100644 index 0000000..8b77d07 --- /dev/null +++ b/tests/salsa20-ctr.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"salsa20_password", + "timeout":60, + "method":"salsa20-ctr", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/salsa20.json b/tests/salsa20.json new file mode 100644 index 0000000..a4a664f --- /dev/null +++ b/tests/salsa20.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"salsa20_password", + "timeout":60, + "method":"salsa20", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/server-multi-passwd-client-side.json b/tests/server-multi-passwd-client-side.json new file mode 100644 index 0000000..c822c98 --- /dev/null +++ b/tests/server-multi-passwd-client-side.json @@ -0,0 +1,8 @@ +{ + "server": "127.0.0.1", + "server_port": "8385", + "local_port": 1081, + "password": "foobar5", + "timeout": 60, + "method": "aes-256-cfb" +} diff --git a/tests/server-multi-passwd-table.json b/tests/server-multi-passwd-table.json new file mode 100644 index 0000000..a2c0a80 --- /dev/null +++ b/tests/server-multi-passwd-table.json @@ -0,0 +1,19 @@ +{ + "server": "127.0.0.1", + "server_port": 8384, + "local_port": 1081, + "password": "foobar4", + "port_password": { + "8381": "foobar1", + "8382": "foobar2", + "8383": "foobar3", + "8384": "foobar4", + "8385": "foobar5", + "8386": "foobar6", + "8387": "foobar7", + "8388": "foobar8", + "8389": "foobar9" + }, + "timeout": 60, + "method": "table" +} diff --git a/tests/server-multi-passwd.json b/tests/server-multi-passwd.json new file mode 100644 index 0000000..b1407f0 --- /dev/null +++ b/tests/server-multi-passwd.json @@ -0,0 +1,17 @@ +{ + "server": "127.0.0.1", + "local_port": 1081, + "port_password": { + "8381": "foobar1", + "8382": "foobar2", + "8383": "foobar3", + "8384": "foobar4", + "8385": "foobar5", + "8386": "foobar6", + "8387": "foobar7", + "8388": "foobar8", + "8389": "foobar9" + }, + "timeout": 60, + "method": "aes-256-cfb" +} diff --git a/tests/server-multi-ports.json b/tests/server-multi-ports.json new file mode 100644 index 0000000..5bdbcab --- /dev/null +++ b/tests/server-multi-ports.json @@ -0,0 +1,8 @@ +{ + "server": "127.0.0.1", + "server_port": [8384, 8345, 8346, 8347], + "local_port": 1081, + "password": "foobar4", + "timeout": 60, + "method": "aes-256-cfb" +} diff --git a/tests/setup_tc.sh b/tests/setup_tc.sh new file mode 100644 index 0000000..1a5fa20 --- /dev/null +++ b/tests/setup_tc.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +DEV=lo +PORT=8388 +DELAY=100ms + +type tc 2> /dev/null && ( + tc qdisc add dev $DEV root handle 1: htb + tc class add dev $DEV parent 1: classid 1:1 htb rate 2mbps + tc class add dev $DEV parent 1:1 classid 1:6 htb rate 2mbps ceil 1mbps prio 0 + tc filter add dev $DEV parent 1:0 prio 0 protocol ip handle 6 fw flowid 1:6 + + tc filter add dev $DEV parent 1:0 protocol ip u32 match ip dport $PORT 0xffff flowid 1:6 + tc filter add dev $DEV parent 1:0 protocol ip u32 match ip sport $PORT 0xffff flowid 1:6 + + tc qdisc show dev lo +) + diff --git a/tests/socksify/install.sh b/tests/socksify/install.sh new file mode 100644 index 0000000..8eff72d --- /dev/null +++ b/tests/socksify/install.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if [ ! -d dante-1.4.0 ]; then + wget http://www.inet.no/dante/files/dante-1.4.0.tar.gz || exit 1 + tar xf dante-1.4.0.tar.gz || exit 1 +fi +pushd dante-1.4.0 +./configure && make -j4 && make install || exit 1 +popd +cp tests/socksify/socks.conf /etc/ || exit 1 diff --git a/tests/socksify/socks.conf b/tests/socksify/socks.conf new file mode 100644 index 0000000..13db772 --- /dev/null +++ b/tests/socksify/socks.conf @@ -0,0 +1,5 @@ +route { + from: 0.0.0.0/0 to: 0.0.0.0/0 via: 127.0.0.1 port = 1081 + proxyprotocol: socks_v5 + method: none +} \ No newline at end of file diff --git a/tests/table.json b/tests/table.json new file mode 100644 index 0000000..49c2c01 --- /dev/null +++ b/tests/table.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"table_password", + "timeout":60, + "method":"table", + "local_address":"127.0.0.1", + "fast_open":false +} diff --git a/tests/test.py b/tests/test.py new file mode 100644 index 0000000..4083401 --- /dev/null +++ b/tests/test.py @@ -0,0 +1,158 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# +# Copyright 2015 clowwindy +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import sys +import os +import signal +import select +import time +import argparse +from subprocess import Popen, PIPE + +python = ['python'] + +default_url = 'http://localhost/' + +parser = argparse.ArgumentParser(description='test Shadowsocks') +parser.add_argument('-c', '--client-conf', type=str, default=None) +parser.add_argument('-s', '--server-conf', type=str, default=None) +parser.add_argument('-a', '--client-args', type=str, default=None) +parser.add_argument('-b', '--server-args', type=str, default=None) +parser.add_argument('--with-coverage', action='store_true', default=None) +parser.add_argument('--should-fail', action='store_true', default=None) +parser.add_argument('--tcp-only', action='store_true', default=None) +parser.add_argument('--url', type=str, default=default_url) +parser.add_argument('--dns', type=str, default='8.8.8.8') + +config = parser.parse_args() + +if config.with_coverage: + python = ['coverage', 'run', '-p'] + +client_args = python + ['shadowsocks/local.py', '-v'] +server_args = python + ['shadowsocks/server.py', '-v'] + +if config.client_conf: + client_args.extend(['-c', config.client_conf]) + if config.server_conf: + server_args.extend(['-c', config.server_conf]) + else: + server_args.extend(['-c', config.client_conf]) +if config.client_args: + client_args.extend(config.client_args.split()) + if config.server_args: + server_args.extend(config.server_args.split()) + else: + server_args.extend(config.client_args.split()) +if config.url == default_url: + server_args.extend(['--forbidden-ip', '']) + +p1 = Popen(server_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) +p2 = Popen(client_args, stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) +p3 = None +p4 = None +p3_fin = False +p4_fin = False + +# 1 shadowsocks started +# 2 curl started +# 3 curl finished +# 4 dig started +# 5 dig finished +stage = 1 + +try: + local_ready = False + server_ready = False + fdset = [p1.stdout, p2.stdout, p1.stderr, p2.stderr] + while True: + r, w, e = select.select(fdset, [], fdset) + if e: + break + + for fd in r: + line = fd.readline() + if not line: + if stage == 2 and fd == p3.stdout: + stage = 3 + if stage == 4 and fd == p4.stdout: + stage = 5 + if bytes != str: + line = str(line, 'utf8') + sys.stderr.write(line) + if line.find('starting local') >= 0: + local_ready = True + if line.find('starting server') >= 0: + server_ready = True + + if stage == 1: + time.sleep(2) + + p3 = Popen(['curl', config.url, '-v', '-L', + '--socks5-hostname', '127.0.0.1:1081', + '-m', '15', '--connect-timeout', '10'], + stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) + if p3 is not None: + fdset.append(p3.stdout) + fdset.append(p3.stderr) + stage = 2 + else: + sys.exit(1) + + if stage == 3 and p3 is not None: + fdset.remove(p3.stdout) + fdset.remove(p3.stderr) + r = p3.wait() + if config.should_fail: + if r == 0: + sys.exit(1) + else: + if r != 0: + sys.exit(1) + if config.tcp_only: + break + p4 = Popen(['socksify', 'dig', '@%s' % config.dns, + 'www.google.com'], + stdin=PIPE, stdout=PIPE, stderr=PIPE, close_fds=True) + if p4 is not None: + fdset.append(p4.stdout) + fdset.append(p4.stderr) + stage = 4 + else: + sys.exit(1) + + if stage == 5: + r = p4.wait() + if config.should_fail: + if r == 0: + sys.exit(1) + print('test passed (expecting failure)') + else: + if r != 0: + sys.exit(1) + print('test passed') + break +finally: + for p in [p1, p2]: + try: + os.kill(p.pid, signal.SIGINT) + os.waitpid(p.pid, 0) + except OSError: + pass diff --git a/tests/test_command.sh b/tests/test_command.sh new file mode 100644 index 0000000..a1a777b --- /dev/null +++ b/tests/test_command.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +. tests/assert.sh + +PYTHON="coverage run -p" +LOCAL="$PYTHON shadowsocks/local.py" +SERVER="$PYTHON shadowsocks/server.py" + +assert "$LOCAL --version 2>&1 | grep Shadowsocks | awk -F\" \" '{print \$1}'" "Shadowsocks" +assert "$SERVER --version 2>&1 | grep Shadowsocks | awk -F\" \" '{print \$1}'" "Shadowsocks" + + +assert "$LOCAL 2>&1 -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d start | grep WARNING | awk -F\"WARNING\" '{print \$2}'" " warning: server set to listen on 127.0.0.1:8388, are you sure?" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + +assert "$LOCAL 2>&1 -m rc4-md5 -k testrc4 -s 0.0.0.0 -p 8388 -t10 -d start | grep WARNING | awk -F\"WARNING\" '{print \$2}'" " warning: your timeout 10 seems too short" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + +assert "$LOCAL 2>&1 -m rc4-md5 -k testrc4 -s 0.0.0.0 -p 8388 -t1000 -d start | grep WARNING | awk -F\"WARNING\" '{print \$2}'" " warning: your timeout 1000 seems too long" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + +assert "$LOCAL 2>&1 -m rc4 -k testrc4 -s 0.0.0.0 -p 8388 -d start | grep WARNING | awk -F\"WARNING\" '{print \$2}'" " warning: RC4 is not safe; please use a safer cipher, like AES-256-CFB" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + +assert "$LOCAL 2>&1 -m rc4-md5 -k mypassword -s 0.0.0.0 -p 8388 -d start | grep ERROR | awk -F\"ERROR\" '{print \$2}'" " DON'T USE DEFAULT PASSWORD! Please change it in your config.json!" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + + +assert "$SERVER 2>&1 --forbidden-ip 127.0.0.1/4a -m rc4-md5 -k 12345 -p 8388 -s 0.0.0.0 -d start | grep ERROR | awk -F\"ERROR\" '{print \$2}'" ": Not a valid CIDR notation: 127.0.0.1/4a" +$LOCAL 2>/dev/null 1>/dev/null -m rc4-md5 -k testrc4 -s 127.0.0.1 -p 8388 -d stop + +assert_end command diff --git a/tests/test_daemon.sh b/tests/test_daemon.sh new file mode 100644 index 0000000..7a192bd --- /dev/null +++ b/tests/test_daemon.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +function run_test { + expected=$1 + shift + echo "running test: $command $@" + $command $@ + status=$? + if [ $status -ne $expected ]; then + echo "exit $status != $expected" + exit 1 + fi + echo "exit status $status == $expected" + echo OK + return +} + +for module in local server +do + +command="coverage run -p shadowsocks/$module.py" + +mkdir -p tmp + +run_test 0 -c tests/aes.json -d stop --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log + +run_test 0 -c tests/aes.json -d start --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 0 -c tests/aes.json -d stop --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log + +run_test 0 -c tests/aes.json -d start --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 1 -c tests/aes.json -d start --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 0 -c tests/aes.json -d stop --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log + +run_test 0 -c tests/aes.json -d start --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 0 -c tests/aes.json -d restart --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 0 -c tests/aes.json -d stop --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log + +run_test 0 -c tests/aes.json -d restart --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log +run_test 0 -c tests/aes.json -d stop --pid-file tmp/shadowsocks.pid --log-file tmp/shadowsocks.log + +run_test 1 -c tests/aes.json -d start --pid-file tmp/not_exist/shadowsocks.pid --log-file tmp/shadowsocks.log + +done diff --git a/tests/test_large_file.sh b/tests/test_large_file.sh new file mode 100644 index 0000000..7a61caf --- /dev/null +++ b/tests/test_large_file.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +PYTHON="coverage run -p" +URL=http://127.0.0.1/file + +mkdir -p tmp + +$PYTHON shadowsocks/local.py -c tests/aes.json & +LOCAL=$! + +$PYTHON shadowsocks/server.py -c tests/aes.json --forbidden-ip "" & +SERVER=$! + +sleep 3 + +time curl -o tmp/expected $URL +time curl -o tmp/result --socks5-hostname 127.0.0.1:1081 $URL + +kill -s SIGINT $LOCAL +kill -s SIGINT $SERVER + +sleep 2 + +diff tmp/expected tmp/result || exit 1 diff --git a/tests/test_udp_src.py b/tests/test_udp_src.py new file mode 100644 index 0000000..e8fa505 --- /dev/null +++ b/tests/test_udp_src.py @@ -0,0 +1,83 @@ +#!/usr/bin/python + +import socket +import socks + + +SERVER_IP = '127.0.0.1' +SERVER_PORT = 1081 + + +if __name__ == '__main__': + # Test 1: same source port IPv4 + sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) + sock_out.bind(('127.0.0.1', 9000)) + + sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + + sock_in1.bind(('127.0.0.1', 9001)) + sock_in2.bind(('127.0.0.1', 9002)) + + sock_out.sendto(b'data', ('127.0.0.1', 9001)) + result1 = sock_in1.recvfrom(8) + + sock_out.sendto(b'data', ('127.0.0.1', 9002)) + result2 = sock_in2.recvfrom(8) + + sock_out.close() + sock_in1.close() + sock_in2.close() + + # make sure they're from the same source port + assert result1 == result2 + + # Test 2: same source port IPv6 + # try again from the same port but IPv6 + sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) + sock_out.bind(('127.0.0.1', 9000)) + + sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, + socket.SOL_UDP) + + sock_in1.bind(('::1', 9001)) + sock_in2.bind(('::1', 9002)) + + sock_out.sendto(b'data', ('::1', 9001)) + result1 = sock_in1.recvfrom(8) + + sock_out.sendto(b'data', ('::1', 9002)) + result2 = sock_in2.recvfrom(8) + + sock_out.close() + sock_in1.close() + sock_in2.close() + + # make sure they're from the same source port + assert result1 == result2 + + # Test 3: different source ports IPv6 + sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) + sock_out.bind(('127.0.0.1', 9003)) + + sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, + socket.SOL_UDP) + sock_in1.bind(('::1', 9001)) + sock_out.sendto(b'data', ('::1', 9001)) + result3 = sock_in1.recvfrom(8) + + # make sure they're from different source ports + assert result1 != result3 + + sock_out.close() + sock_in1.close() diff --git a/tests/test_udp_src.sh b/tests/test_udp_src.sh new file mode 100644 index 0000000..6a778ab --- /dev/null +++ b/tests/test_udp_src.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +PYTHON="coverage run -p" + +mkdir -p tmp + +$PYTHON shadowsocks/local.py -c tests/aes.json -v & +LOCAL=$! + +$PYTHON shadowsocks/server.py -c tests/aes.json --forbidden-ip "" -v & +SERVER=$! + +sleep 3 + +python tests/test_udp_src.py +r=$? + +kill -s SIGINT $LOCAL +kill -s SIGINT $SERVER + +sleep 2 + +exit $r diff --git a/tests/workers.json b/tests/workers.json new file mode 100644 index 0000000..8e0943b --- /dev/null +++ b/tests/workers.json @@ -0,0 +1,10 @@ +{ + "server":"127.0.0.1", + "server_port":8388, + "local_port":1081, + "password":"workers_password", + "timeout":60, + "method":"aes-256-cfb", + "local_address":"127.0.0.1", + "workers": 4 +} diff --git a/utils/README.md b/utils/README.md new file mode 100644 index 0000000..f624309 --- /dev/null +++ b/utils/README.md @@ -0,0 +1,9 @@ +Useful Tools +=========== + +autoban.py +---------- + +Automatically ban IPs that try to brute force crack the server. + +See https://github.com/shadowsocks/shadowsocks/wiki/Ban-Brute-Force-Crackers diff --git a/utils/autoban.py b/utils/autoban.py new file mode 100644 index 0000000..1bbb65c --- /dev/null +++ b/utils/autoban.py @@ -0,0 +1,53 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# Copyright (c) 2015 clowwindy +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +from __future__ import absolute_import, division, print_function, \ + with_statement + +import os +import sys +import argparse + +if __name__ == '__main__': + parser = argparse.ArgumentParser(description='See README') + parser.add_argument('-c', '--count', default=3, type=int, + help='with how many failure times it should be ' + 'considered as an attack') + config = parser.parse_args() + ips = {} + banned = set() + for line in sys.stdin: + if 'can not parse header when' in line: + ip = line.split()[-1].split(':')[0] + if ip not in ips: + ips[ip] = 1 + print(ip) + sys.stdout.flush() + else: + ips[ip] += 1 + if ip not in banned and ips[ip] >= config.count: + banned.add(ip) + cmd = 'iptables -A INPUT -s %s -j DROP' % ip + print(cmd, file=sys.stderr) + sys.stderr.flush() + os.system(cmd) diff --git a/utils/fail2ban/shadowsocks.conf b/utils/fail2ban/shadowsocks.conf new file mode 100644 index 0000000..9b1c7ec --- /dev/null +++ b/utils/fail2ban/shadowsocks.conf @@ -0,0 +1,5 @@ +[Definition] + +_daemon = shadowsocks + +failregex = ^\s+ERROR\s+can not parse header when handling connection from :\d+$ diff --git a/web_transfer.py b/web_transfer.py new file mode 100644 index 0000000..eb88c95 --- /dev/null +++ b/web_transfer.py @@ -0,0 +1,693 @@ +#!/usr/bin/python +# -*- coding: UTF-8 -*- + +import logging +import time +import sys +import os +import socket +from server_pool import ServerPool +import traceback +from shadowsocks import common, shell, lru_cache +from configloader import load_config, get_config +import importloader +import platform +import datetime +import fcntl + + +switchrule = None +db_instance = None + + +class WebTransfer(object): + + def __init__(self): + import threading + self.last_update_transfer = {} + self.event = threading.Event() + self.port_uid_table = {} + self.uid_port_table = {} + self.node_speedlimit = 0.00 + self.traffic_rate = 0.0 + + self.detect_text_list = {} + + self.detect_hex_list = {} + + self.mu_only = False + self.is_relay = False + + self.relay_rule_list = {} + self.node_ip_list = [] + self.mu_port_list = [] + + self.has_stopped = False + + def update_all_user(self, dt_transfer): + global webapi + + update_transfer = {} + + alive_user_count = 0 + bandwidth_thistime = 0 + + data = [] + for id in dt_transfer.keys(): + if dt_transfer[id][0] == 0 and dt_transfer[id][1] == 0: + continue + data.append({'u': dt_transfer[id][0], 'd': dt_transfer[ + id][1], 'user_id': self.port_uid_table[id]}) + update_transfer[id] = dt_transfer[id] + webapi.postApi('users/traffic', + {'node_id': get_config().NODE_ID}, + {'data': data}) + + webapi.postApi( + 'nodes/%d/info' % + (get_config().NODE_ID), { + 'node_id': get_config().NODE_ID}, { + 'uptime': str( + self.uptime()), 'load': str( + self.load())}) + + online_iplist = ServerPool.get_instance().get_servers_iplist() + data = [] + for port in online_iplist.keys(): + for ip in online_iplist[port]: + data.append({'ip': ip, 'user_id': self.port_uid_table[port]}) + webapi.postApi('users/aliveip', + {'node_id': get_config().NODE_ID}, + {'data': data}) + + detect_log_list = ServerPool.get_instance().get_servers_detect_log() + data = [] + for port in detect_log_list.keys(): + for rule_id in detect_log_list[port]: + data.append({'list_id': rule_id, + 'user_id': self.port_uid_table[port]}) + webapi.postApi('users/detectlog', + {'node_id': get_config().NODE_ID}, + {'data': data}) + + deny_str = "" + data = [] + if platform.system() == 'Linux' and get_config().ANTISSATTACK == 1: + wrong_iplist = ServerPool.get_instance().get_servers_wrong() + server_ip = socket.gethostbyname(get_config().MYSQL_HOST) + for id in wrong_iplist.keys(): + for ip in wrong_iplist[id]: + realip = "" + is_ipv6 = False + if common.is_ip(ip): + if(common.is_ip(ip) == socket.AF_INET): + realip = ip + else: + if common.match_ipv4_address(ip) is not None: + realip = common.match_ipv4_address(ip) + else: + is_ipv6 = True + realip = ip + else: + continue + + if str(realip).find(str(server_ip)) != -1: + continue + + has_match_node = False + for node_ip in self.node_ip_list: + if str(realip).find(node_ip) != -1: + has_match_node = True + continue + + if has_match_node: + continue + + if get_config().CLOUDSAFE == 1: + data.append({'ip': realip}) + else: + if not is_ipv6: + os.system( + 'route add -host %s gw 127.0.0.1' % + str(realip)) + deny_str = deny_str + "\nALL: " + str(realip) + else: + os.system( + 'ip -6 route add ::1/128 via %s/128' % + str(realip)) + deny_str = deny_str + \ + "\nALL: [" + str(realip) + "]/128" + + logging.info("Local Block ip:" + str(realip)) + if get_config().CLOUDSAFE == 0: + deny_file = open('/etc/hosts.deny', 'a') + fcntl.flock(deny_file.fileno(), fcntl.LOCK_EX) + deny_file.write(deny_str) + deny_file.close() + webapi.postApi('func/block_ip', + {'node_id': get_config().NODE_ID}, + {'data': data}) + return update_transfer + + def uptime(self): + with open('/proc/uptime', 'r') as f: + return float(f.readline().split()[0]) + + def load(self): + import os + return os.popen( + "cat /proc/loadavg | awk '{ print $1\" \"$2\" \"$3 }'").readlines()[0] + + def trafficShow(self, Traffic): + if Traffic < 1024: + return str(round((Traffic), 2)) + "B" + + if Traffic < 1024 * 1024: + return str(round((Traffic / 1024), 2)) + "KB" + + if Traffic < 1024 * 1024 * 1024: + return str(round((Traffic / 1024 / 1024), 2)) + "MB" + + return str(round((Traffic / 1024 / 1024 / 1024), 2)) + "GB" + + def push_db_all_user(self): + # 更新用户流量到数据库 + last_transfer = self.last_update_transfer + curr_transfer = ServerPool.get_instance().get_servers_transfer() + # 上次和本次的增量 + dt_transfer = {} + for id in curr_transfer.keys(): + if id in last_transfer: + if curr_transfer[id][0] + curr_transfer[id][1] - \ + last_transfer[id][0] - last_transfer[id][1] <= 0: + continue + if last_transfer[id][0] <= curr_transfer[id][0] and \ + last_transfer[id][1] <= curr_transfer[id][1]: + dt_transfer[id] = [ + curr_transfer[id][0] - last_transfer[id][0], + curr_transfer[id][1] - last_transfer[id][1]] + else: + dt_transfer[id] = [ + curr_transfer[id][0], curr_transfer[id][1]] + else: + if curr_transfer[id][0] + curr_transfer[id][1] <= 0: + continue + dt_transfer[id] = [curr_transfer[id][0], curr_transfer[id][1]] + for id in dt_transfer.keys(): + last = last_transfer.get(id, [0, 0]) + last_transfer[id] = [ + last[0] + dt_transfer[id][0], + last[1] + dt_transfer[id][1]] + self.last_update_transfer = last_transfer.copy() + self.update_all_user(dt_transfer) + + def pull_db_all_user(self): + global webapi + + nodeinfo = webapi.getApi( + 'nodes/%d/info' % + (get_config().NODE_ID)) + + if not nodeinfo: + rows = [] + return rows + + self.node_speedlimit = nodeinfo['node_speedlimit'] + self.traffic_rate = nodeinfo['traffic_rate'] + + self.mu_only = nodeinfo['mu_only'] + + if nodeinfo['sort'] == 10: + self.is_relay = True + else: + self.is_relay = False + + data = webapi.getApi('users', {'node_id': get_config().NODE_ID}) + + if not data: + rows = [] + return rows + + rows = data + + # 读取节点IP + # SELECT * FROM `ss_node` where `node_ip` != '' + self.node_ip_list = [] + data = webapi.getApi('nodes') + for node in data: + temp_list = str(node['node_ip']).split(',') + self.node_ip_list.append(temp_list[0]) + + # 读取审计规则,数据包匹配部分 + + self.detect_text_list = {} + self.detect_hex_list = {} + data = webapi.getApi('func/detect_rules') + for rule in data: + d = {} + d['id'] = int(rule['id']) + d['regex'] = str(rule['regex']) + if int(rule['type']) == 1: + self.detect_text_list[d['id']] = d.copy() + else: + self.detect_hex_list[d['id']] = d.copy() + + # 读取中转规则,如果是中转节点的话 + + if self.is_relay: + self.relay_rule_list = {} + + data = webapi.getApi( + 'func/relay_rules', {'node_id': get_config().NODE_ID}) + for rule in data: + d = {} + d['id'] = int(rule['id']) + d['user_id'] = int(rule['user_id']) + d['dist_ip'] = str(rule['dist_ip']) + d['port'] = int(rule['port']) + d['priority'] = int(rule['priority']) + self.relay_rule_list[d['id']] = d.copy() + + return rows + + def cmp(self, val1, val2): + if isinstance(val1, bytes): + val1 = common.to_str(val1) + if isinstance(val2, bytes): + val2 = common.to_str(val2) + return val1 == val2 + + def del_server_out_of_bound_safe(self, last_rows, rows): + # 停止超流量的服务 + # 启动没超流量的服务 + # 需要动态载入switchrule,以便实时修改规则 + + try: + switchrule = importloader.load('switchrule') + except Exception as e: + logging.error('load switchrule.py fail') + cur_servers = {} + new_servers = {} + + md5_users = {} + + self.mu_port_list = [] + + for row in rows: + if row['is_multi_user'] != 0: + self.mu_port_list.append(int(row['port'])) + continue + + md5_users[row['id']] = row.copy() + del md5_users[row['id']]['u'] + del md5_users[row['id']]['d'] + if md5_users[row['id']]['disconnect_ip'] is None: + md5_users[row['id']]['disconnect_ip'] = '' + + if md5_users[row['id']]['forbidden_ip'] is None: + md5_users[row['id']]['forbidden_ip'] = '' + + if md5_users[row['id']]['forbidden_port'] is None: + md5_users[row['id']]['forbidden_port'] = '' + md5_users[row['id']]['md5'] = common.get_md5( + str(row['id']) + row['passwd'] + row['method'] + row['obfs'] + row['protocol']) + + for row in rows: + self.port_uid_table[row['port']] = row['id'] + self.uid_port_table[row['id']] = row['port'] + + if self.mu_only == 1: + i = 0 + while i < len(rows): + if rows[i]['is_multi_user'] == 0: + rows.pop(i) + i -= 1 + else: + pass + i += 1 + + if self.mu_only == -1: + i = 0 + while i < len(rows): + if rows[i]['is_multi_user'] != 0: + rows.pop(i) + i -= 1 + else: + pass + i += 1 + + for row in rows: + port = row['port'] + user_id = row['id'] + passwd = common.to_bytes(row['passwd']) + cfg = {'password': passwd} + + read_config_keys = [ + 'method', + 'obfs', + 'obfs_param', + 'protocol', + 'protocol_param', + 'forbidden_ip', + 'forbidden_port', + 'node_speedlimit', + 'disconnect_ip', + 'is_multi_user'] + + for name in read_config_keys: + if name in row and row[name]: + cfg[name] = row[name] + + merge_config_keys = ['password'] + read_config_keys + for name in cfg.keys(): + if hasattr(cfg[name], 'encode'): + try: + cfg[name] = cfg[name].encode('utf-8') + except Exception as e: + logging.warning( + 'encode cfg key "%s" fail, val "%s"' % (name, cfg[name])) + + if 'node_speedlimit' in cfg: + if float( + self.node_speedlimit) > 0.0 or float( + cfg['node_speedlimit']) > 0.0: + cfg['node_speedlimit'] = max( + float( + self.node_speedlimit), float( + cfg['node_speedlimit'])) + else: + cfg['node_speedlimit'] = max( + float(self.node_speedlimit), float(0.00)) + + if 'disconnect_ip' not in cfg: + cfg['disconnect_ip'] = '' + + if 'forbidden_ip' not in cfg: + cfg['forbidden_ip'] = '' + + if 'forbidden_port' not in cfg: + cfg['forbidden_port'] = '' + + if 'protocol_param' not in cfg: + cfg['protocol_param'] = '' + + if 'obfs_param' not in cfg: + cfg['obfs_param'] = '' + + if 'is_multi_user' not in cfg: + cfg['is_multi_user'] = 0 + + if port not in cur_servers: + cur_servers[port] = passwd + else: + logging.error( + 'more than one user use the same port [%s]' % + (port,)) + continue + + if cfg['is_multi_user'] != 0: + cfg['users_table'] = md5_users.copy() + + cfg['detect_hex_list'] = self.detect_hex_list.copy() + cfg['detect_text_list'] = self.detect_text_list.copy() + + if self.is_relay and row['is_multi_user'] != 2: + temp_relay_rules = {} + for id in self.relay_rule_list: + if ((self.relay_rule_list[id]['user_id'] == user_id or self.relay_rule_list[id]['user_id'] == 0) or row[ + 'is_multi_user'] != 0) and (self.relay_rule_list[id]['port'] == 0 or self.relay_rule_list[id]['port'] == port): + has_higher_priority = False + for priority_id in self.relay_rule_list: + if ( + ( + self.relay_rule_list[priority_id]['priority'] > self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] != self.relay_rule_list[priority_id]['id']) or ( + self.relay_rule_list[priority_id]['priority'] == self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] > self.relay_rule_list[priority_id]['id'])) and ( + self.relay_rule_list[priority_id]['user_id'] == user_id or self.relay_rule_list[priority_id]['user_id'] == 0) and ( + self.relay_rule_list[priority_id]['port'] == port or self.relay_rule_list[priority_id]['port'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self.relay_rule_list[id]['dist_ip'] == '0.0.0.0' and row['is_multi_user'] == 0: + continue + + temp_relay_rules[id] = self.relay_rule_list[id] + + cfg['relay_rules'] = temp_relay_rules.copy() + else: + temp_relay_rules = {} + + cfg['relay_rules'] = temp_relay_rules.copy() + + if ServerPool.get_instance().server_is_run(port) > 0: + cfgchange = False + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().tcp_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().udp_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_detect_text_list(self.detect_text_list) + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_detect_hex_list(self.detect_hex_list) + + if row['is_multi_user'] != 0: + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].modify_multi_user_table(md5_users) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].modify_multi_user_table(md5_users) + + if self.is_relay and row['is_multi_user'] != 2: + temp_relay_rules = {} + for id in self.relay_rule_list: + if ((self.relay_rule_list[id]['user_id'] == user_id or self.relay_rule_list[id]['user_id'] == 0) or row[ + 'is_multi_user'] != 0) and (self.relay_rule_list[id]['port'] == 0 or self.relay_rule_list[id]['port'] == port): + has_higher_priority = False + for priority_id in self.relay_rule_list: + if ( + ( + self.relay_rule_list[priority_id]['priority'] > self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] != self.relay_rule_list[priority_id]['id']) or ( + self.relay_rule_list[priority_id]['priority'] == self.relay_rule_list[id]['priority'] and self.relay_rule_list[id]['id'] > self.relay_rule_list[priority_id]['id'])) and ( + self.relay_rule_list[priority_id]['user_id'] == user_id or self.relay_rule_list[priority_id]['user_id'] == 0) and ( + self.relay_rule_list[priority_id]['port'] == port or self.relay_rule_list[priority_id]['port'] == 0): + has_higher_priority = True + continue + + if has_higher_priority: + continue + + if self.relay_rule_list[id][ + 'dist_ip'] == '0.0.0.0' and row['is_multi_user'] == 0: + continue + + temp_relay_rules[id] = self.relay_rule_list[id] + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + + else: + temp_relay_rules = {} + + if port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + port].push_relay_rules(temp_relay_rules) + if port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + port].push_relay_rules(temp_relay_rules) + + if port in ServerPool.get_instance().tcp_servers_pool: + relay = ServerPool.get_instance().tcp_servers_pool[port] + for name in merge_config_keys: + if name in cfg and not self.cmp( + cfg[name], relay._config[name]): + cfgchange = True + break + if not cfgchange and port in ServerPool.get_instance().tcp_ipv6_servers_pool: + relay = ServerPool.get_instance().tcp_ipv6_servers_pool[ + port] + for name in merge_config_keys: + if name in cfg and not self.cmp( + cfg[name], relay._config[name]): + cfgchange = True + break + # config changed + if cfgchange: + self.del_server(port, "config changed") + new_servers[port] = (passwd, cfg) + elif ServerPool.get_instance().server_run_status(port) is False: + # new_servers[port] = passwd + self.new_server(port, passwd, cfg) + + for row in last_rows: + if row['port'] in cur_servers: + pass + else: + self.del_server(row['port'], "port not exist") + + if len(new_servers) > 0: + from shadowsocks import eventloop + self.event.wait(eventloop.TIMEOUT_PRECISION + + eventloop.TIMEOUT_PRECISION / 2) + for port in new_servers.keys(): + passwd, cfg = new_servers[port] + self.new_server(port, passwd, cfg) + + ServerPool.get_instance().push_uid_port_table(self.uid_port_table) + + def del_server(self, port, reason): + logging.info( + 'db stop server at port [%s] reason: %s!' % + (port, reason)) + ServerPool.get_instance().cb_del_server(port) + if port in self.last_update_transfer: + del self.last_update_transfer[port] + + for mu_user_port in self.mu_port_list: + if mu_user_port in ServerPool.get_instance().tcp_servers_pool: + ServerPool.get_instance().tcp_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().tcp_ipv6_servers_pool: + ServerPool.get_instance().tcp_ipv6_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().udp_servers_pool: + ServerPool.get_instance().udp_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + if mu_user_port in ServerPool.get_instance().udp_ipv6_servers_pool: + ServerPool.get_instance().udp_ipv6_servers_pool[ + mu_user_port].reset_single_multi_user_traffic(self.port_uid_table[port]) + + def new_server(self, port, passwd, cfg): + protocol = cfg.get( + 'protocol', + ServerPool.get_instance().config.get( + 'protocol', + 'origin')) + method = cfg.get( + 'method', + ServerPool.get_instance().config.get( + 'method', + 'None')) + obfs = cfg.get( + 'obfs', + ServerPool.get_instance().config.get( + 'obfs', + 'plain')) + logging.info( + 'db start server at port [%s] pass [%s] protocol [%s] method [%s] obfs [%s]' % + (port, passwd, protocol, method, obfs)) + ServerPool.get_instance().new_server(port, cfg) + + @staticmethod + def del_servers(): + global db_instance + for port in [ + v for v in ServerPool.get_instance().tcp_servers_pool.keys()]: + if ServerPool.get_instance().server_is_run(port) > 0: + ServerPool.get_instance().cb_del_server(port) + if port in db_instance.last_update_transfer: + del db_instance.last_update_transfer[port] + for port in [ + v for v in ServerPool.get_instance().tcp_ipv6_servers_pool.keys()]: + if ServerPool.get_instance().server_is_run(port) > 0: + ServerPool.get_instance().cb_del_server(port) + if port in db_instance.last_update_transfer: + del db_instance.last_update_transfer[port] + + @staticmethod + def thread_db(obj): + import socket + import time + import webapi_utils + global db_instance + global webapi + timeout = 60 + socket.setdefaulttimeout(timeout) + last_rows = [] + db_instance = obj() + webapi = webapi_utils.WebApi() + + shell.log_shadowsocks_version() + try: + import resource + logging.info( + 'current process RLIMIT_NOFILE resource: soft %d hard %d' % + resource.getrlimit( + resource.RLIMIT_NOFILE)) + except: + pass + try: + while True: + load_config() + try: + ping = webapi.getApi('func/ping') + if ping is None: + logging.error( + 'something wrong with your http api, please check your config and website status and try again later.') + else: + db_instance.push_db_all_user() + rows = db_instance.pull_db_all_user() + db_instance.del_server_out_of_bound_safe( + last_rows, rows) + last_rows = rows + except Exception as e: + trace = traceback.format_exc() + logging.error(trace) + # logging.warn('db thread except:%s' % e) + if db_instance.event.wait(60) or not db_instance.is_all_thread_alive(): + break + if db_instance.has_stopped: + break + except KeyboardInterrupt as e: + pass + db_instance.del_servers() + ServerPool.get_instance().stop() + db_instance = None + + @staticmethod + def thread_db_stop(): + global db_instance + db_instance.has_stopped = True + db_instance.event.set() + + def is_all_thread_alive(self): + if not ServerPool.get_instance().thread.is_alive(): + return False + return True diff --git a/webapi_utils.py b/webapi_utils.py new file mode 100644 index 0000000..03775fa --- /dev/null +++ b/webapi_utils.py @@ -0,0 +1,69 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +import logging +import requests +from configloader import load_config, get_config +from collections import OrderedDict + +class WebApi(object): + + def __init__(self): + self.session_pool = requests.Session() + + def getApi(self, uri, params={}): + res = None + try: + uri_params = params.copy() + uri_params['key'] = get_config().WEBAPI_TOKEN + res = self.session_pool.get( + '%s/mod_mu/%s' % + (get_config().WEBAPI_URL, uri), + params=uri_params, + timeout=10) + try: + data = res.json() + except Exception: + if res: + logging.error("Error data:%s" % (res.text)) + raise Exception('error data!') + if data['ret'] == 0: + logging.error("Error data:%s" % (res.text)) + logging.error("request %s error!wrong ret!"%(uri)) + raise Exception('wrong ret!') + return data['data'] + except Exception: + import traceback + trace = traceback.format_exc() + logging.error(trace) + raise Exception('network issue or server error!') + + + def postApi(self, uri, params={}, raw_data={}): + res = None + try: + uri_params = params.copy() + uri_params['key'] = get_config().WEBAPI_TOKEN + res = self.session_pool.post( + '%s/mod_mu/%s' % + (get_config().WEBAPI_URL, + uri), + params=uri_params, + json=raw_data, + timeout=10) + try: + data = res.json() + except Exception: + if res: + logging.error("Error data:%s" % (res.text)) + raise Exception('error data!') + if data['ret'] == 0: + logging.error("Error data:%s" % (res.text)) + logging.error("request %s error!wrong ret!"%(uri)) + raise Exception('wrong ret!') + return data['data'] + except Exception: + import traceback + trace = traceback.format_exc() + logging.error(trace) + raise Exception('network issue or server error!')