diff --git a/.travis.yml b/.travis.yml index 3b7390129..325d3a7fb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -43,6 +43,11 @@ matrix: os: linux dist: xenial stage: lint + - python: 3.7.2 + env: TOXENV=spell + os: linux + dist: xenial + stage: lint - python: 3.6.0 env: TOXENV=3.6 IDENT="3.6.0" RUN_SUITE=y NO_CYTHON=y os: linux diff --git a/AUTHORS.rst b/AUTHORS.rst index 56b84d6ea..845933575 100644 --- a/AUTHORS.rst +++ b/AUTHORS.rst @@ -15,8 +15,8 @@ Creators Ask the community for help in the Slack channel, or ask a question on Stack Overflow. -Comitters -========= +Committers +========== +---------------------+---------------------------------------+ | Arpan Shah | | diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index bd9bf0681..630a91ff8 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -28,7 +28,7 @@ the person merging the changes so don't worry too much. Code of Conduct =============== -Everyone interacting in the project's codebases, issue trackers, chat rooms, +Everyone interacting in the project's code bases, issue trackers, chat rooms, and mailing lists is expected to follow the Faust Code of Conduct. As contributors and maintainers of these projects, and in the interest of fostering diff --git a/Changelog.rst b/Changelog.rst index c789ff418..048f279f6 100644 --- a/Changelog.rst +++ b/Changelog.rst @@ -46,7 +46,7 @@ please visit the :ref:`history` section. Implemented in the default monitor, but also for statsd and datadog. -- **CLI**: The :program:`faust` program had the wrong help descrioption. +- **CLI**: The :program:`faust` program had the wrong help description. - **Docs**: Fixes typo in :setting:`web_cors_options` example. @@ -237,7 +237,7 @@ please visit the :ref:`history` section. - **App**: Web server is no longer running in a separate thread by default. Running the web server in a separate thread is beneficial as it - will not be affected by backpressue in the main thread event loop, + will not be affected by back pressure in the main thread event loop, but it also makes programming harder when it cannot share the loop of the parent. @@ -332,7 +332,7 @@ please visit the :ref:`history` section. See new :setting:`web_cors_options` setting. -- **Debugging**: Added `OpenTracing`_ hooks to streams/tasks/timers/crontabs +- **Debugging**: Added `OpenTracing`_ hooks to streams/tasks/timers/Crontabs and rebalancing process. To enable you have to define a custom ``Tracer`` class that will diff --git a/Makefile b/Makefile index 6a9f068d5..895deb24a 100644 --- a/Makefile +++ b/Makefile @@ -60,7 +60,7 @@ help: @echo "contrib - Regenerate CONTRIBUTING.rst file" @echo "coc - Regenerate CODE_OF_CONDUCT.rst file" @echo "clean-dist --------- - Clean all distribution build artifacts." - @echo " clean-git-force - Remove all uncomitted files." + @echo " clean-git-force - Remove all uncommitted files." @echo " clean ------------ - Non-destructive clean" @echo " clean-pyc - Remove .pyc/__pycache__ files" @echo " clean-docs - Remove documentation build artifacts." @@ -113,6 +113,9 @@ apicheck: configcheck: (cd "$(SPHINX_DIR)"; $(MAKE) configcheck) +spell: + (cd "$(SPHINX_DIR)"; $(MAKE) spell) + flakecheck: $(FLAKE8) "$(PROJ)" "$(TESTDIR)" examples/ diff --git a/README.rst b/README.rst index 1a8f28bc0..1de470bd5 100644 --- a/README.rst +++ b/README.rst @@ -580,7 +580,7 @@ documentation. Code of Conduct =============== -Everyone interacting in the project's codebases, issue trackers, chat rooms, +Everyone interacting in the project's code bases, issue trackers, chat rooms, and mailing lists is expected to follow the Faust Code of Conduct. As contributors and maintainers of these projects, and in the interest of fostering diff --git a/TODO.rst b/TODO.rst index 537552208..28864750b 100644 --- a/TODO.rst +++ b/TODO.rst @@ -129,7 +129,7 @@ Documentation - Message lifecycle - - Manual acknowledgement (``async with event``) + - Manual acknowledgment (``async with event``) - Arguments to ``app.agent`` @@ -168,7 +168,7 @@ Documentation - recovery - - acknowledgements + - acknowledgments - Go through comments in the code, some of it describes things that should be documented. diff --git a/docs/Makefile b/docs/Makefile index bc3c18034..49a17e4ac 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -52,7 +52,7 @@ help: @echo " coverage to run coverage check of the documentation (if enabled)" @echo " apicheck to verify that all modules are present in autodoc" @echo " configcheck to verify that all modules are present in autodoc" - @echo " spelling to run a spell checker on the documentation" + @echo " spell to run a spell checker on the documentation" .PHONY: clean clean: @@ -233,9 +233,9 @@ apicheck: configcheck: $(SPHINXBUILD) -b configcheck $(ALLSPHINXOPTS) $(BUILDDIR)/configcheck -.PHONY: spelling -spelling: - SPELLCHECK=1 $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling +.PHONY: spell +spell: + SPELLCHECK=1 $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spell .PHONY: xml xml: diff --git a/docs/developerguide/overview.rst b/docs/developerguide/overview.rst index 11c0de5bf..4271fe7df 100644 --- a/docs/developerguide/overview.rst +++ b/docs/developerguide/overview.rst @@ -48,7 +48,7 @@ Module Overview top-level package. ``faust.web`` - Web abstractions and web apps served by the Faust web server. + Web abstractions and web applications served by the Faust web server. ``faust.windows`` Windowing strategies. @@ -63,8 +63,8 @@ Services Everything in Faust that can be started/stopped and restarted, is a :class:`~faust.utils.services.Service`. -Services can start other services, but they can also start asyncio.Tasks via -`self.add_future`. These dependencies will be started/stopped/restarted with +Services can start other services, but they can also start :class:`asyncio.Task` +via `self.add_future`. These dependencies will be started/stopped/restarted with the service. ``Worker`` @@ -76,7 +76,7 @@ setting up logging, installs signal handlers and debugging tools etc. ``App`` ------- -The app configures the Faust instance, and is the entrypoint for just about +The app configures the Faust instance, and is the entry point for just about everything that happens in a Faust instance. Consuming/Producing messages, starting streams and agents, etc. @@ -102,7 +102,7 @@ in anticipation of messages to be produced. The Consumer is responsible for consuming messages from Kafka topics, to be delivered to the streams. It does not actually fetch messages (the -``Fetcher`` services does tha), but it handles everything to do with +``Fetcher`` services does that), but it handles everything to do with consumption, like managing topic subscriptions etc. ``Agent`` diff --git a/docs/glossary.rst b/docs/glossary.rst index 4749f551b..f465d3e4e 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -9,7 +9,7 @@ Glossary acked acking acknowledged - Acknowledgement marks a message as fully processed. + Acknowledgment marks a message as fully processed. It’s a signal that the program does not want to see the message again. Faust advances the offset by committing after a message is acknowledged. @@ -95,7 +95,7 @@ Glossary Further reading: https://en.wikipedia.org/wiki/Idempotent nullipotent - nillipotence + nullipotence nullipotency describes a function that'll have the same effect, and give the same result, even if called zero or multiple times (side-effect free). diff --git a/docs/history/changelog-1.0.rst b/docs/history/changelog-1.0.rst index 35051f692..f55f41b25 100644 --- a/docs/history/changelog-1.0.rst +++ b/docs/history/changelog-1.0.rst @@ -180,7 +180,7 @@ For even older releases you can visit the :ref:`history` section. - **Web**: Adds ``app.on_webserver_init(web)`` callback for ability to serve static files using ``web.add_static``. -- **Web**: Adds web.add_static(prefix, fs_path) +- **Web**: Adds ``web.add_static(prefix, fs_path)`` - **Worker**: New ``App.unassigned`` attribute is now set if the worker does not have any assigned partitions. @@ -267,7 +267,7 @@ For even older releases you can visit the :ref:`history` section. with the official versions of :pypi:`aiokafka` and :pypi:`kafka-python`. If you have those in requirements, please remove them from your - virtual env and remove them from requirements. + ``virtualenv`` and remove them from requirements. - **Worker**: Fixes hanging in wait_empty. @@ -303,7 +303,7 @@ For even older releases you can visit the :ref:`history` section. + Now depends on :pypi:`robinhood-aiokafka` 0.4.12 -- **Worker**: Fixed problem where worker does not recover after macbook +- **Worker**: Fixed problem where worker does not recover after MacBook sleeping and waking up. - **Worker**: Fixed crash that could lead to rebalancing loop. @@ -377,11 +377,11 @@ For even older releases you can visit the :ref:`history` section. - **Statsd**: The statsd monitor prematurely initialized the event loop on module import. - We had a fix for this, but somehow forgot to remove the "hardcoded + We had a fix for this, but somehow forgot to remove the "hard coded super" that was set to call: ``Service.__init__(self, **kwargs)``. The class is not even a subclass of Service anymore, and we are lucky it - manifests merely when doing something drastic, like py.test, + manifests merely when doing something drastic, like ``py.test``, recursively importing all modules in a directory. .. _version-1.0.12: @@ -523,10 +523,10 @@ For even older releases you can visit the :ref:`history` section. + Now depends on Aiokafka 0.4.7. -- **Table**: Delete keys whe raw value in changelog set to None +- **Table**: Delete keys when raw value in changelog set to :const:`None` - This was resulting in deleted keys still being present with value None - upon recovery. + This was resulting in deleted keys still being present with value + :const:`None` upon recovery. - **Transports**: Crash app on CommitFailedError thrown by :pypi:`aiokafka`. @@ -562,7 +562,7 @@ For even older releases you can visit the :ref:`history` section. with 100% CPU usage. After processing all records in all topic partitions, the worker - would spinloop. + would spin loop. - **API**: Added new base class for windows: :class:`faust.Window` @@ -743,7 +743,7 @@ For even older releases you can visit the :ref:`history` section. # manual acknowledgment await stream.ack(stream.current_event) - .. admonition:: Manual Acknowledgement + .. admonition:: Manual Acknowledgment The stream is a sequence of events, where each event has a sequence number: the "offset". @@ -752,7 +752,7 @@ For even older releases you can visit the :ref:`history` section. the Kafka broker will keep track of the last committed offset for any topic. - This means "acknowledgement" works quite differently from other + This means "acknowledgment" works quite differently from other message brokers, such as RabbitMQ where you can selectively ack some messages, but not others. diff --git a/docs/history/changelog-1.1.rst b/docs/history/changelog-1.1.rst index 1de4ce7bf..46e6f696e 100644 --- a/docs/history/changelog-1.1.rst +++ b/docs/history/changelog-1.1.rst @@ -57,7 +57,7 @@ For even older releases you can visit the :ref:`history` section. if the buffer grows too large and we have too much to do during commit. - The commit operation works like this (in pseudocode) when + The commit operation works like this (in pseudo code) when :setting:`stream_publish_on_commit` is enabled: .. sourcecode:: python @@ -262,7 +262,7 @@ News volume: Decimal When serialized this model will use string for decimal fields - (the javascript float type cannot be used without losing precision, it + (the Javascript float type cannot be used without losing precision, it is a float after all), but when deserializing Faust will reconstruct them as Decimal objects from that string. @@ -390,7 +390,7 @@ News Contributed by Mika Eloranta (:github_user:`melor`). - **Distribution**: ``pip install faust`` no longer installs the examples - direcrtory. + directory. Fix contributed by Michael Seifert (:github_user:`seifertm`) diff --git a/docs/history/changelog-1.2.rst b/docs/history/changelog-1.2.rst index 9cbf5dc65..e157c7c88 100644 --- a/docs/history/changelog-1.2.rst +++ b/docs/history/changelog-1.2.rst @@ -65,7 +65,7 @@ For even older releases you can visit the :ref:`history` section. This will be the :class:`aiohttp.web_app.Application` instance used. - **Documentation**: Fixed markup typo in the settings section of the - userguide (Issue #177). + :ref:`guide` (Issue #177). Contributed by Denis Kataev (:github_user:`kataev`). diff --git a/docs/history/changelog-1.4.rst b/docs/history/changelog-1.4.rst index f60ae12e9..d02c12a00 100644 --- a/docs/history/changelog-1.4.rst +++ b/docs/history/changelog-1.4.rst @@ -24,7 +24,7 @@ For even older releases you can visit the :ref:`history` section. - :setting:`max_poll_records` accidentally set to 500 by default. The setting has been reverted to its documented default of :const:`None`. - This resulted in a 20x performance improvement :tada: + This resulted in a 20x performance improvement. - **CLI**: Now correctly returns non-zero exitcode when exception raised inside ``@app.command``. @@ -195,7 +195,7 @@ For even older releases you can visit the :ref:`history` section. + Allison Wang (:github_user:`allisonwang`). + Thibault Serot (:github_user:`thibserot`). - + oucb (:github_user:`oucb`). + + :github_user:`oucb`. - **CI**: Added CPython 3.7.2 and 3.6.8 to Travis CI build matrix. @@ -252,8 +252,8 @@ For even older releases you can visit the :ref:`history` section. - **Worker**: The Kafka consumer is now running in a separate thread. - The Kafka heartbeat background corutine sends heartbeats every 3.0 seconds, - and if those are missed rebalancing occurs. + The Kafka heartbeat background coroutine sends heartbeats + every 3.0 seconds, and if those are missed rebalancing occurs. This patch moves the :pypi:`aiokafka` library inside a separate thread, this way it can send responsive heartbeats and operate even when agents @@ -273,9 +273,9 @@ For even older releases you can visit the :ref:`history` section. Instead we periodically flush changes to RocksDB, and populate the sets from disk at worker startup/table recovery. -- **App**: Adds support for crontab tasks. +- **App**: Adds support for Crontab tasks. - You can now define periodic tasks using cron-syntax: + You can now define periodic tasks using Cron-syntax: .. sourcecode:: python @@ -293,12 +293,12 @@ For even older releases you can visit the :ref:`history` section. - **App**: Providing multiple URLs to the :setting:`broker` setting now works as expected. - To facilitiate this change ``app.conf.broker`` is now + To facilitate this change ``app.conf.broker`` is now ``List[URL]`` instead of a single :class:`~yarl.URL`. - **App**: New :setting:`timezone` setting. - This setting is currently used as the default timezone for crontab tasks. + This setting is currently used as the default timezone for Crontab tasks. - **App**: New :setting:`broker_request_timeout` setting. diff --git a/docs/includes/code-of-conduct.txt b/docs/includes/code-of-conduct.txt index 297155cc6..5ad1f23b9 100644 --- a/docs/includes/code-of-conduct.txt +++ b/docs/includes/code-of-conduct.txt @@ -1,7 +1,7 @@ Code of Conduct =============== -Everyone interacting in the project's codebases, issue trackers, chat rooms, +Everyone interacting in the project's code bases, issue trackers, chat rooms, and mailing lists is expected to follow the Faust Code of Conduct. As contributors and maintainers of these projects, and in the interest of fostering diff --git a/docs/includes/installation.txt b/docs/includes/installation.txt index 09310aeca..866ac0211 100644 --- a/docs/includes/installation.txt +++ b/docs/includes/installation.txt @@ -44,7 +44,7 @@ Caching ~~~~~~~ :``faust[redis]``: - for using `Redis_` as a simple caching backend (memcache-style). + for using `Redis_` as a simple caching backend (Memcached-style). Optimization ~~~~~~~~~~~~ diff --git a/docs/includes/intro.txt b/docs/includes/intro.txt index e347c9125..b70838fa1 100644 --- a/docs/includes/intro.txt +++ b/docs/includes/intro.txt @@ -44,7 +44,7 @@ This system can persist state, acting like a database. Tables are named distributed key/value stores you can use as regular Python dictionaries. -Tables are stored locally on each machine using a superfast +Tables are stored locally on each machine using a super fast embedded database written in C++, called `RocksDB`_. Tables can also store aggregate counts that are optionally "windowed" diff --git a/docs/includes/introduction.txt b/docs/includes/introduction.txt index 0608ab79c..87b9d8118 100644 --- a/docs/includes/introduction.txt +++ b/docs/includes/introduction.txt @@ -58,4 +58,4 @@ Faust is just Python, and a stream is an infinite asynchronous iterator. If you know how to use Python, you already know how to use Faust, and it works with your favorite Python libraries like Django, Flask, - SQLAlchemy, NTLK, NumPy, Scikit, TensorFlow, etc. + SQLAlchemy, NTLK, NumPy, SciPy, TensorFlow, etc. diff --git a/docs/includes/kafka.txt b/docs/includes/kafka.txt index cc38ace61..c4418da29 100644 --- a/docs/includes/kafka.txt +++ b/docs/includes/kafka.txt @@ -85,8 +85,8 @@ What you must know about Apache Kafka to use Faust **Log Compaction** - Log compaction is methodology Kafka uses to make sure that as data for a key - changes it doesn't affect the size of the log such that every state change + Log compaction is a methodology Kafka uses to make sure that as data for a key + changes it will not affect the size of the log such that every state change is maintained for all time. Only the most recent value is guaranteed to be available. Periodic compaction removes all values for a key except the last one. diff --git a/docs/introduction.rst b/docs/introduction.rst index b956f9f88..009a0fefe 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -81,8 +81,8 @@ What can it do? services. Thanks to Faust and :mod:`asyncio` you can now embed your stream processing - topology into your existing asyncio/gevent/eventlet/Twisted/Tornado - applications. + topology into your existing :mod:`asyncio`/:pypi:`gevent`/ + :pypi:`eventlet`/:pypi:`Twisted`/:pypi:`Tornado` applications. **Faust is...** .. include:: includes/introduction.txt @@ -150,7 +150,7 @@ What do I need? **Extensions** - - RocksDB 5.0 or later, python-rocksdb + - RocksDB 5.0 or later, :pypi:`python-rocksdb` Faust requires Python 3.6 or later, and a running Kafka broker. @@ -164,19 +164,19 @@ Extensions +--------------+-------------+--------------------------------------------------+ | **Name** | **Version** | **Bundle** | +--------------+-------------+--------------------------------------------------+ -| rocksdb | 5.0 | ``pip install faust[rocksdb]`` | +| ``rocksdb`` | 5.0 | ``pip install faust[rocksdb]`` | +--------------+-------------+--------------------------------------------------+ -| redis | aredis 1.1 | ``pip install faust[redis]`` | +| ``redis`` | aredis 1.1 | ``pip install faust[redis]`` | +--------------+-------------+--------------------------------------------------+ -| datadog | 0.20.0 | ``pip install faust[datadog]`` | +| ``datadog`` | 0.20.0 | ``pip install faust[datadog]`` | +--------------+-------------+--------------------------------------------------+ -| statsd | 3.2.1 | ``pip install faust[statsd]`` | +| ``statsd`` | 3.2.1 | ``pip install faust[statsd]`` | +--------------+-------------+--------------------------------------------------+ -| uvloop | 0.8.1 | ``pip install faust[uvloop]`` | +| ``uvloop`` | 0.8.1 | ``pip install faust[uvloop]`` | +--------------+-------------+--------------------------------------------------+ -| gevent | 1.4.0 | ``pip install faust[gevent]`` | +| ``gevent`` | 1.4.0 | ``pip install faust[gevent]`` | +--------------+-------------+--------------------------------------------------+ -| eventlet | 1.16.0 | ``pip install faust[eventlet]`` | +| ``eventlet`` | 1.16.0 | ``pip install faust[eventlet]`` | +--------------+-------------+--------------------------------------------------+ Optimizations @@ -184,32 +184,32 @@ Optimizations These can be all installed using ``pip install faust[fast]``: -+--------------+-------------+--------------------------------------------------+ -| **Name** | **Version** | **Bundle** | -+--------------+-------------+--------------------------------------------------+ -| aiodns | 1.1.0 | ``pip install faust[aiodns]`` | -+--------------+-------------+--------------------------------------------------+ -| cchardet | 1.1.0 | ``pip install faust[cchardet]`` | -+--------------+-------------+--------------------------------------------------+ -| ciso8601 | 2.1.0 | ``pip install faust[ciso8601]`` | -+--------------+-------------+--------------------------------------------------+ -| cython | 0.9.26 | ``pip install faust[cython]`` | -+--------------+-------------+--------------------------------------------------+ -| setproctitle | 1.1.0 | ``pip install faust[setproctitle]`` | -+--------------+-------------+--------------------------------------------------+ ++------------------+-------------+--------------------------------------------------+ +| **Name** | **Version** | **Bundle** | ++------------------+-------------+--------------------------------------------------+ +| ``aiodns`` | 1.1.0 | ``pip install faust[aiodns]`` | ++------------------+-------------+--------------------------------------------------+ +| ``cchardet`` | 1.1.0 | ``pip install faust[cchardet]`` | ++------------------+-------------+--------------------------------------------------+ +| ``ciso8601`` | 2.1.0 | ``pip install faust[ciso8601]`` | ++------------------+-------------+--------------------------------------------------+ +| ``cython`` | 0.9.26 | ``pip install faust[cython]`` | ++------------------+-------------+--------------------------------------------------+ +| ``setproctitle`` | 1.1.0 | ``pip install faust[setproctitle]`` | ++------------------+-------------+--------------------------------------------------+ Debugging extras ---------------- These can be all installed using ``pip install faust[debug]``: -+--------------+-------------+--------------------------------------------------+ -| **Name** | **Version** | **Bundle** | -+--------------+-------------+--------------------------------------------------+ -| aiomonitor | 0.3 | ``pip install faust[aiomonitor]`` | -+--------------+-------------+--------------------------------------------------+ -| setproctitle | 1.1.0 | ``pip install faust[setproctitle]`` | -+--------------+-------------+--------------------------------------------------+ ++------------------+-------------+--------------------------------------------------+ +| **Name** | **Version** | **Bundle** | ++------------------+-------------+--------------------------------------------------+ +| ``aiomonitor`` | 0.3 | ``pip install faust[aiomonitor]`` | ++------------------+-------------+--------------------------------------------------+ +| ``setproctitle`` | 1.1.0 | ``pip install faust[setproctitle]`` | ++------------------+-------------+--------------------------------------------------+ .. note:: diff --git a/docs/playbooks/cheatsheet.rst b/docs/playbooks/cheatsheet.rst index ddbbb4248..bc37402d8 100644 --- a/docs/playbooks/cheatsheet.rst +++ b/docs/playbooks/cheatsheet.rst @@ -1,5 +1,5 @@ =================================== - Cheatsheet + Cheat Sheet =================================== .. topic:: Process events in a Kafka topic diff --git a/docs/playbooks/quickstart.rst b/docs/playbooks/quickstart.rst index 559377af4..461728b82 100644 --- a/docs/playbooks/quickstart.rst +++ b/docs/playbooks/quickstart.rst @@ -1,7 +1,7 @@ .. _quickstart: ============================================================ - Quickstart + Quick Start ============================================================ .. contents:: diff --git a/docs/spelling_wordlist.txt b/docs/spelling_wordlist.txt new file mode 100644 index 000000000..072f9c1e1 --- /dev/null +++ b/docs/spelling_wordlist.txt @@ -0,0 +1,708 @@ +許邱翔 +AMQP +Adriaenssens +Adrien +Agris +Ahmet +Aitor +Akira +Alain +Alcides +Aleksandr +Alexey +Allard +Alman +Almeer +Ameriks +Amit +Andreas +Andrey +Andriy +Aneil +Areski +Armin +Arpan +Artyom +Atanasov +Attias +Attwood +Autechre +Avro +Axel +Aziz +Azovskov +Babiy +Bargen +Baumgold +Belaid +Bence +Berker +Bevan +Biel +Biggs +Bistuer +Bolshakov +Bouterse +Bozorgkhan +Brakhane +Brendon +Breshears +Bridgen +Briem +Brodie +Bryson +Buckens +Bujniewicz +Buttu +CPython +Carvalho +Cassandra +Catalano +Catalin +Chamberlin +Chiastic +Chintomby +Christoph +Cipater +Clowes +Cobertura +Codeb +Copartitioned +CouchDB +Couchbase +Cramer +Cristian +Cron +Crontab +Crontabs +Czajka +Danilo +Daodao +Dartiguelongue +Davanum +Davide +Davidsson +Deane +Dees +Dein +Delalande +Demir +Deo +Django +Dmitry +Dubus +Dudás +Duggan +Duryee +Elasticsearch +Eloranta +Engledew +Eran +Erway +Esquivel +Farrimond +Farwell +Fatih +Feanil +Fladischer +Flavio +Floering +Fokau +Frantisek +Gao +Garnero +Gauvrit +Gedminas +Georgievsky +Germán +Gheem +Gilles +GitHub +Goel +Goiri +Gorbunov +Grainger +Greinhofer +Groner +Grossi +Grégoire +Guillaume +Guinet +Gunnlaugur +Gylfason +Gómez +Haag +Haddleton +Harnly +Harrigan +Haskins +Helmers +Helmig +Henrik +Heroku +Hoch +Hoeve +Hogni +Holop +Homebrew +Honza +Hsad +Hu +Huyuumi +Hynek +IP +Iacob +Idan +Ignas +Illes +Ilya +Ionel +IronCache +Iurii +Jaillet +Jameel +Jamshed +Janež +Jaren +Javascript +Jelle +Jellick +Jerzy +Jevnik +Jiangmiao +Jirka +Johansson +Julien +Jython +KStream +Kai +Kalinowski +Kamara +Karlsson +Kataev +Katz +Khera +KiB +Kilgo +Kirill +Kiriukha +Kirkham +Kjartansson +Klindukh +Kombu +Konstantin +Konstantinos +Kornelijus +Korner +Koshelev +Kotlyarov +Kouhei +Koukopoulos +Koval +Kozera +Kracekumar +Kral +Kriachko +Krybus +Krzysztof +Krzyzanowski +Kubernetes +Kumar +Kupershmidt +Kuznetsov +Lamport +Langford +Latitia +Lavin +Lawley +Lebedev +Ledesma +Legrand +Liang +Lim +Loic +Luckie +Maeda +Maillard +Malinovsky +Mallavarapu +Manipon +Marcio +Maries +Markey +Markus +Marlow +Masiero +Matsuzaki +Maxime +Maślanka +McGregor +Melin +Memcached +Metzlar +Mher +Mickaël +Miha +Mika +Mikalajūnas +Milen +Mitar +ModelT +Modrzejewski +MongoDB +Movsisyan +Munin +Môshe +Mărieș +NaN +Nagurney +Narasimhan +Nextdoor +Nik +Nikolov +Nimi +Node.js +Northway +Nullary +Nyby +ORM +O'Reilly +Oberegger +Oblovatniy +Omer +Ordoquy +Ori +Parncutt +Patrin +Paulo +Pavel +Pavlovic +Pearce +Peksag +Penhard +Pepijn +Permana +Petersson +Petrello +Pika +Piotr +Playbooks +Podshumok +Poissonnier +Pomfrey +Pravec +Prithvi +Pulec +Pyro +Pär +QoS +Qpid +Quarta +RPC +RSS +Rabbaglietti +RabbitMQ +Rackspace +Radek +Raghuram +Ramaraju +Rao +Raphaël +Rattray +Rayward +Redis +Remigiusz +Remy +Renberg +Riak +Ribeiro +Rinat +Ripshtos +Robenolt +Robinhood +Rodionoff +Romuald +Ronacher +Rongze +Rossi +Rouberol +Rudakou +Rundstein +Rémy +SQLAlchemy +SQS +Sadaoui +Sanic +Sanyam +Satia +Savchenko +Savvides +Schlawack +Schottdorf +Schwarz +Seifert +Selivanov +SemVer +Seong +Sergey +Serot +Seto +Seungha +Shahi +Shigapov +Shrey +Slinckx +Smirnov +Solem +Solt +Sosnovskiy +Srinivas +Srinivasan +Stas +StateDB +Steeve +Sterre +Streeter +Sucu +Sukrit +Surloppe +Survila +SysV +Tadej +Tallon +Tamas +Tantiras +Taub +Tewfik +Theo +Thibault +Thrift +Tikhonov +Tobias +Tochev +Tocho +TopicPartition +Troha +Tsigularov +Twomey +URI +Ullmann +Unix +Valentyn +Vanderbauwhede +Varona +Vdb +Veatch +Vejrazka +Verhagen +Verstraaten +Vesuna +Viamontes +Viktor +Vineet +Vitaly +Vixie +Voronov +Vos +Vsevolod +Wariboko +Webber +Werkzeug +Whitlock +Whitten +Widman +Wieslander +Wil +Wiman +Wun +Wutte +Yaroslav +Younkins +Yu +Yurchuk +Yury +Yuval +Zarowny +Zatelepin +Zaytsev +Zhanzhao +Zhaorong +Zhavoronkov +Zhu +Zhy +Zookeeper +Zoran +Zoë +abortable +ack +acked +acking +acks +acyclic +amongst +app +apps +arg +args +arity +async +asyncio +autocommit +autodetect +autodiscover +autodiscovery +autodoc +autoscale +autoscaler +autoscalers +autoscaling +backend +backends +backport +backported +backtrace +bootstep +bootsteps +bufsize +bugfix +callables +callbacks +celerymon +changelog +changelogged +chunking +cipater +codebase +codec +committer +committers +compat +conf +config +const +contrib +copartitioned +coroutine +coroutines +cronjob +cronjobs +cryptographic +daemonization +daemonize +daemonizing +dataclasses +datetimes +dburi +de +deprecated +deprecations +der +deserialization +deserialize +deserialized +deserializes +deserializing +dest +destructor +dev +distro +docstring +docstrings +embeddable +encodable +env +errbacks +euid +eventlet +exc +execv +exitcode +failover +falsy +fanout +faust +fh +filename +fixup +fixups +func +gevent +gid +greenlet +greenlets +greenthreads +hashable +highwater +highwaters +hostname +http +idempotence +idempotency +ident +indices +init +initializer +inline +instantiation +interoperability +intersphinx +iterable +js +json +kombu +kwarg +kwargs +localhost +logfile +login +loglevel +lookup +macOS +memoization +memoize +memoized +metadata +metavar +misconfiguration +misconfigure +misconfigured +msgpack +multi +mutex +mutexes +mymodule +namespace +nargs +natively +nodename +nullipotence +nullipotency +nullipotent +optimizations +persister +pickleable +pid +pidbox +pidfile +pidfiles +playbook +pluggable +plugin +poller +pre +prefetch +prefetched +prefetching +prefork +preload +preloading +prepend +prepended +programmatically +proj +protobuf +questionark +rdb +reStructured +rebalance +rebalances +rebalancing +rebased +rebasing +redelegation +redelivered +redelivery +reentrancy +reentrant +refactor +refactored +refactoring +referenceable +regex +regexes +reloader +requeue +reraise +reserialized +resize +resized +resizing +rtype +runlevel +runtime +sNaN +screenshot +screenshots +semipredicate +semipredicates +serializable +serialized +serializer +serializers +serializes +serializing +sharded +sharding +signalled +spamming +starargs +starmap +stateful +statsd +stderr +stdlib +stdout +subclassed +subclasses +subclassing +submodule +subobjects +subpackages +subtask +subtasks +supervisord +symlink +symlinked +symlinks +taskset +timedelta +timestamp +timezones +tracebacks +truthy +tuple +tuples +uid +umask +unacked +undeliverable +unencrypted +unlink +unlinked +unlinks +unmanaged +unorderable +unpickleable +unpickled +unregister +unrepresentable +unroutable +untrusted +urlsafe +username +usernames +utcoffset +utils +versa +versioning +wbits +weakref +weakrefs +webhook +webhooks +wiki +wildcard +writable +yaml +Ádám +Łukasz diff --git a/docs/userguide/agents.rst b/docs/userguide/agents.rst index b5abdc41c..f699894a4 100644 --- a/docs/userguide/agents.rst +++ b/docs/userguide/agents.rst @@ -78,14 +78,14 @@ partitions so that every agent receives a specific portion of the stream. amongst a cluster of workers, and you can always repartition that stream later should you need to access data in a table or similar. -.. admonition:: Fault tolerancy +.. admonition:: Fault tolerance If the worker for a partition fails, or is blocked from the network for some reason, there is no need to worry because Kafka solves this by moving the partition to a worker that works. Faust also takes advantage of "standby tables" and a custom partition - manager that prefes to promote any node with a full copy of the data, + manager that prefers to promote any node with a full copy of the data, saving startup time and ensuring availability. Here's a complete example of an app, having an agent that adds numbers: @@ -405,7 +405,7 @@ items in the stream concurrently (and in no particular order). .. warning:: Concurrent instances of an agent will process the stream out-of-order, - so you aren't allowed to mutate :ref:`tables ` + so you cannot mutate :ref:`tables ` from within the agent function: An agent having `concurrency > 1`, can only read from a table, never write. @@ -525,7 +525,7 @@ simply because it violates ""exactly-once" semantics". Crashing the instance to require human intervention is certainly a choice, but far from ideal considering how common mistakes - in code or unhandled exceptions are. It may be better to log + in code or unexpected exceptions are. It may be better to log the error and have ops replay and reprocess the stream on notification. @@ -586,7 +586,7 @@ into agents while at the same time gathering the results. ``map`` streams results as they come in (out-of-order), and ``join`` waits until all the steps are complete (back-to-order) and return the results -in a list with orering preserved: +in a list with order preserved: ``map(values: Union[AsyncIterable[V], Iterable[V]])`` Map takes an async iterable, or a regular iterable, and returns an async diff --git a/docs/userguide/application.rst b/docs/userguide/application.rst index edd58b12c..94e6974e0 100644 --- a/docs/userguide/application.rst +++ b/docs/userguide/application.rst @@ -71,8 +71,8 @@ Here we set the broker URL to Kafka, and the storage driver to `RocksDB`_: ... store='rocksdb://', ... ) -"kafka://localhost" is used if you don't configure a broker URL. -The first part of the URL ("kafka://"), is called the scheme and specifies +``kafka://localhost`` is used if you don't configure a broker URL. +The first part of the URL (``kafka://``), is called the scheme and specifies the driver that you want to use (it can also be the fully qualified path to a Python class). @@ -187,8 +187,8 @@ Topic Arguments + ``acks``: :class:`bool` - Enable automatic acknowledgement for this topic. If you disable this - then you are responsible for manually acknowleding each event. + Enable automatic acknowledgment for this topic. If you disable this + then you are responsible for manually acknowledging each event. + ``internal``: :class:`bool` @@ -255,7 +255,7 @@ Channel Arguments + ``key_serializer``/``value_serializer``: :data:`~faust.types.codecs.CodecArg` - The codec/serializer type used for keys and values in this channell + The codec/serializer type used for keys and values in this channel. If not specified the default will be taken from the :setting:`key_serializer` and :setting:`value_serializer` settings. @@ -446,7 +446,7 @@ used by each agent: │ @myagent │ stream-example-examples.agent.myagent │ Example agent. │ └──────────┴───────────────────────────────────────┴────────────────┘ -The agent reads from the "stream-example-examples.agent.myagent" topic, whose +The agent reads from the ``stream-example-examples.agent.myagent`` topic, whose name is generated from the application :setting:`id` setting, the application :setting:`version` setting, and the fully qualified path of the agent (``examples.agent.myagent``). @@ -969,11 +969,12 @@ Starting the App ================ You can start a worker instance for your app from the command-line, or you can -start it inline in your Python process. To accomodate the many ways you may -want to embed a Faust application, starting the app have several possible entrypoints: +start it inline in your Python process. To accommodate the many ways you may +want to embed a Faust application, starting the app have several +possible entry points: -*App entrypoints*: +*App entry points*: 1) :program:`faust worker` @@ -987,7 +988,7 @@ want to embed a Faust application, starting the app have several possible entryp app.main() For packages you can add a ``__main__.py`` module or setuptools - entrypoints to ``setup.py``. + entry points to ``setup.py``. If you have the module name where an app is defined, you can start a worker for it with the :option:`faust -A` option: @@ -1012,7 +1013,7 @@ want to embed a Faust application, starting the app have several possible entryp It is responsible for: - Parsing the command-line arguments supported by :program:`faust worker`. - - Printing the banner box (you will not get that with entrypoint 3 or 4). + - Printing the banner box (you will not get that with entry point 3 or 4). - Starting the :class:`faust.Worker` (see next step). 3) -> :class:`faust.Worker` @@ -1026,7 +1027,7 @@ want to embed a Faust application, starting the app have several possible entryp sense given that Faust is built out of many different :pypi:`mode` services starting in a particular order. - The :class:`faust.Worker` entrypoint is responsible for: + The :class:`faust.Worker` entry point is responsible for: - Changing the directory when the ``workdir`` argument is set. @@ -1037,7 +1038,7 @@ want to embed a Faust application, starting the app have several possible entryp - If :option:`--debug ` is enabled: - - Starting the :pypi:`aiomonitor` debugging backdoor. + - Starting the :pypi:`aiomonitor` debugging back door. - Starting the blocking detector. @@ -1127,7 +1128,7 @@ want to embed a Faust application, starting the app have several possible entryp loop.ensure_future(app.start()) If your program is written as a set of :pypi:`Mode` services, you can - simply add the app as a depdendency to your service: + simply add the app as a dependency to your service: .. sourcecode:: python @@ -1251,10 +1252,10 @@ If you want more careful control you can specify a list of modules to traverse i See also :ref:`project-layout-django`. -Problem: Entrypoint -~~~~~~~~~~~~~~~~~~~ +Problem: Entry Point +~~~~~~~~~~~~~~~~~~~- -The :file:`proj/__main__.py` module can act as the entrypoint for this +The :file:`proj/__main__.py` module can act as the entry point for this project: .. sourcecode:: python @@ -1307,7 +1308,7 @@ setuptools to install a command-line program for your project. For inspiration you can also look to the `setup.py` files in the :pypi:`faust` and :pypi:`mode` source code distributions. -2) Add the command as a setuptools entrypoint. +2) Add the command as a setuptools entry point. To your :file:`setup.py` add the following argument: @@ -1325,7 +1326,7 @@ setuptools to install a command-line program for your project. This essentially defines that the ``proj`` program runs `from proj.app import main` -3) Install your package using setup.py or :program:`pip`. +3) Install your package using ``setup.py`` or :program:`pip`. When developing your project locally you should use ``setup.py develop`` to use the source code directory as a Python package: diff --git a/docs/userguide/cli.rst b/docs/userguide/cli.rst index 59d4e7959..19155af58 100644 --- a/docs/userguide/cli.rst +++ b/docs/userguide/cli.rst @@ -49,11 +49,12 @@ decorator (see :ref:`tasks-cli-commands`). .. cmdoption:: --loop, -L - Event loop implementation to use: aio (default), gevent, uvloop. + Event loop implementation to use: ``aio`` (default), ``gevent``, + ``uvloop``. .. admonition:: Why is ``examples/word_count.py`` used as the program? - The convention for Faust projects is to define an entrypoint for + The convention for Faust projects is to define an entry point for the Faust command using ``app.main()`` - see :ref:`application-main` to see how to do so. @@ -74,10 +75,10 @@ decorator (see :ref:`tasks-cli-commands`). from package.app import app app.main() - Or use setuptools entrypoints so that ``pip install myproj`` installs + Or use setuptools entry points so that ``pip install myproj`` installs a command-line program. - Even if you don't add an entrypoint you can always use the + Even if you don't add an entry point you can always use the :program:`faust` program by specifying the path to an app. Either the name of a module having an ``app`` attribute: diff --git a/docs/userguide/settings.rst b/docs/userguide/settings.rst index e8010b436..ba3f7ff6a 100644 --- a/docs/userguide/settings.rst +++ b/docs/userguide/settings.rst @@ -113,7 +113,7 @@ The default is to not use any authentication. SASL Authentication ~~~~~~~~~~~~~~~~~~~ -You can enable SASL authentication via plaintext: +You can enable SASL authentication via plain text: .. sourcecode:: python @@ -220,7 +220,7 @@ preferred. :type: ``str`` :default: ``URL("memory://")`` -Optional backend used for memcached-style caching. +Optional backend used for Memcached-style caching. URL can be: ``redis://host``, ``rediscluster://host``, or ``memory://``. .. setting:: processing_guarantee @@ -293,7 +293,7 @@ The value for this argument can be: faust -A example.simple worker The :option:`-A `, option specifies the app, but you can also - create a shortcut entrypoint entrypoint by calling ``app.main()``: + create a shortcut entry point by calling ``app.main()``: .. sourcecode:: python @@ -339,7 +339,8 @@ False) app = App(..., autodiscover=True) It will find agents and other decorators in all of the reusable Django - apps. If you want to manually control what packages are traversed, then provide + applications. If you want to manually control what packages are + traversed, then provide a list:: app = App(..., autodiscover=['package1', 'package2']) @@ -466,7 +467,7 @@ The reverse path used to find the app, for example if the app is located in:: Then the ``origin`` should be ``"myproj.app"``. The :program:`faust worker` program will try to automatically set the origin, -but if you are having problems with autogenerated names then you can set +but if you are having problems with auto generated names then you can set origin manually. @@ -580,7 +581,7 @@ Advanced Broker Settings :type: ``str`` :default: ``f"faust-{VERSION}"`` -You shouldn't have to set this manually. +There is rarely any reason to configure this setting. The client id is used to identify the software used, and is not usually configured by the user. @@ -593,7 +594,7 @@ configured by the user. .. versionadded:: 1.4.0 :type: :class:`int` -:default: ``40.0`` (fourty seconds) +:default: ``40.0`` (forty seconds) Kafka client request timeout. @@ -625,7 +626,7 @@ How often we commit messages that have been fully processed (:term:`acked`). ``broker_commit_livelock_soft_timeout`` --------------------------------------- -:type: class:`float`, :class:`~datetime.timedelta` +:type: :class:`float`, :class:`~datetime.timedelta` :default: ``300.0`` (five minutes) How long time it takes before we warn that the Kafka commit offset has @@ -760,7 +761,7 @@ Advanced Producer Settings :default: ``None`` The compression type for all data generated by the producer. Valid values are -'gzip', 'snappy', 'lz4', or None. +`gzip`, `snappy`, `lz4`, or :const:`None`. .. setting:: producer_linger_ms @@ -811,7 +812,7 @@ durability of records that are sent. The following settings are common: * ``0``: Producer will not wait for any acknowledgment from the server at all. The message will immediately be considered sent. (Not recommended) * ``1``: The broker leader will write the record to its local log but will - respond without awaiting full acknowledgement from all followers. In this + respond without awaiting full acknowledgment from all followers. In this case should the leader fail immediately after acknowledging the record but before the followers have replicated it then the record will be lost. * ``-1``: The broker leader will wait for the full set of in-sync replicas to @@ -919,7 +920,7 @@ Advanced Stream Settings :type: :class:`int` :default: 4096 -This setting control backpressure to streams and agents reading from streams. +This setting control back pressure to streams and agents reading from streams. If set to 4096 (default) this means that an agent can only keep at most 4096 unprocessed items in the stream buffer. @@ -931,7 +932,7 @@ sends messages or update tables (which sends changelog messages). This means that if the buffer size is large, the :setting:`broker_commit_interval` or :setting:`broker_commit_every` settings -must be set to commit frequently, avoiding backpressure from building up. +must be set to commit frequently, avoiding back pressure from building up. A buffer size of 131_072 may let you process over 30,000 events a second as a baseline, but be careful with a buffer size that large when you also @@ -976,7 +977,7 @@ streams are idempotent you can disable it using this setting. :default: :const:`False` If enabled we buffer up sending messages until the -source topic offset related to that processsing is committed. +source topic offset related to that processing is committed. This means when we do commit, we may have buffered up a LOT of messages so commit needs to happen frequently (make sure to decrease :setting:`broker_commit_every`). @@ -1268,8 +1269,9 @@ at startup. :type: ``Union[float, datetime.timedelta]`` :default: ``timedelta(days=1)`` -The expiry time (in seconds float, or timedelta), for how long replies -will stay in the instances local reply topic before being removed. +The expiry time (in seconds :class:`float`, or :class:`~datetime.timedelta`), +for how long replies will stay in the instances local reply topic +before being removed. .. setting:: reply_to_prefix diff --git a/docs/userguide/streams.rst b/docs/userguide/streams.rst index 28c1f6d1b..6e0df75f3 100644 --- a/docs/userguide/streams.rst +++ b/docs/userguide/streams.rst @@ -51,7 +51,7 @@ iterate over key/value pairs (using Keys and values can be bytes for manual deserialization, or :class:`~faust.Model` instances, and this is decided by the topic's ``key_type`` and -``value_type`` argumetns. +``value_type`` arguments. .. seealso:: @@ -77,7 +77,7 @@ topic and iterate over it: print(w.amount) Do note that the worker must be started first (or at least the app), -for this to work, and the stream iterater probably needs to be started +for this to work, and the stream iterator needs to be started as an :class:`asyncio.Task`, so a more practical example is: .. sourcecode:: python @@ -151,8 +151,8 @@ reduction after all processors are applied: # value = add_default_language(add_client_info(value)) -Message Lifecycle -================= +Message Life Cycle +================== Kafka Topics ------------ @@ -289,8 +289,8 @@ rather start individual tasks: Operations ========== -``group_by()`` -- Repartiton the stream ---------------------------------------- +``group_by()`` -- Repartition the stream +---------------------------------------- The :meth:`Stream.group_by() ` method repartitions the stream by taking a "key type" as argument: @@ -376,7 +376,7 @@ Note that this changes the type of what you iterate over from ``Stream`` to Use :meth:`Stream.events() ` to iterate over raw ``Event`` values, including access to original message payload and message -metadata: +meta data: .. sourcecode:: python diff --git a/docs/userguide/tables.rst b/docs/userguide/tables.rst index 8388dda52..308110d65 100644 --- a/docs/userguide/tables.rst +++ b/docs/userguide/tables.rst @@ -73,7 +73,7 @@ streams must share shards. To shard the table differently, you must first repartition the stream using :class:`~@Stream.group_by`. -Repartion a stream: +Repartition a stream: .. sourcecode:: python @@ -132,7 +132,7 @@ partitioned by country name, is partitioned by the user ID. In practice, this means that data for a country may reside on multiple partitions, and worker instances end up with incomplete data. -To fix that reimplement your program like this, using two distinct agents +To fix that rewrite your program like this, using two distinct agents and repartition the stream by country when populating the table: .. sourcecode:: python diff --git a/docs/userguide/tasks.rst b/docs/userguide/tasks.rst index 236223493..1e2c19028 100644 --- a/docs/userguide/tasks.rst +++ b/docs/userguide/tasks.rst @@ -60,7 +60,7 @@ something every minute. Cron Jobs ========= -A cron job is a task that executes according to a crontab format, +A Cron job is a task that executes according to a Crontab format, usually at fixed times: .. sourcecode:: python @@ -70,7 +70,7 @@ usually at fixed times: print('WAKE UP ONCE A DAY') -After starting the worker, and it's operational, the above cron job will print +After starting the worker, and it's operational, the above Cron job will print something every day at 8pm. ``crontab`` takes 1 mandatory argument ``cron_format`` and 2 optional arguments: @@ -139,7 +139,7 @@ Restart your Faust worker, and you can visit your new page at: http://localhost:6066/count/ Your workers may have an arbitrary number of views, and it's up to you what -they provide. Just like other web apps they can communicate with Redis, +they provide. Just like other web applications they can communicate with Redis, SQL databases, and so on. Anything you want, really, and it's executing in an asynchronous event loop. @@ -296,7 +296,7 @@ Custom CLI Commands ------------------- To add a custom command to your app, see the :file:`examples/simple.py` -example in the Faust distribution, where we've added a ``produce`` command +example in the Faust distribution, where we added a ``produce`` command used to send example data into the stream processors: .. sourcecode:: python diff --git a/docs/userguide/workers.rst b/docs/userguide/workers.rst index dcef463e9..55f1561f0 100644 --- a/docs/userguide/workers.rst +++ b/docs/userguide/workers.rst @@ -16,7 +16,7 @@ Starting the worker .. sidebar:: Daemonizing You probably want to use a daemonization tool to start - the worker in the background. Use systemd, supervisord or + the worker in the background. Use `systemd`, `supervisord` or any of the tools you usually use to start services. We hope to have a detailed guide for each of these soon. @@ -82,7 +82,7 @@ again by another worker. .. admonition:: Starting subprocesses For Faust applications that start subprocesses as a side - effect of processsing the stream, you should know that the "double-fork" + effect of processing the stream, you should know that the "double-fork" problem on Unix means that the worker will not be able to reap its children when killed using the :sig:`KILL` signal. diff --git a/examples/advanced/isolated_partitions_crashing.py b/examples/advanced/isolated_partitions_crashing.py index b91dac17a..a37b9c4ea 100755 --- a/examples/advanced/isolated_partitions_crashing.py +++ b/examples/advanced/isolated_partitions_crashing.py @@ -1,8 +1,8 @@ #!/usr/bin/env python """Withdrawal example. -Quickstart -========== +Quick Start +=========== 1) Start worker: diff --git a/examples/django/README.rst b/examples/django/README.rst index 8e4fc00e7..4d094484e 100644 --- a/examples/django/README.rst +++ b/examples/django/README.rst @@ -7,7 +7,7 @@ Directory Layout We have also added a ``proj/__main__.py`` that executes if you do ``python -m proj``, and it will work as the manage.py for the project. - This is also installed by setup.py as an entrypoint, so after + This is also installed by setup.py as an entry point, so after ``python setup.py install`` or ``python setup.py develop`` the ``proj`` command will be available:: diff --git a/examples/django/faustapp/__main__.py b/examples/django/faustapp/__main__.py index e0afedc38..73f6c36ce 100644 --- a/examples/django/faustapp/__main__.py +++ b/examples/django/faustapp/__main__.py @@ -1,2 +1,2 @@ from faustapp.app import app -app.main() # start entrypoint for :program:`faust` +app.main() # start entry point for :program:`faust` diff --git a/examples/withdrawals.py b/examples/withdrawals.py index a096cd063..096a7b67c 100755 --- a/examples/withdrawals.py +++ b/examples/withdrawals.py @@ -1,8 +1,8 @@ #!/usr/bin/env python """Withdrawal example. -Quickstart -========== +Quick Start +=========== 1) Start worker: diff --git a/faust/__main__.py b/faust/__main__.py index f95374a3a..6d3aa93b6 100644 --- a/faust/__main__.py +++ b/faust/__main__.py @@ -1,4 +1,4 @@ -"""Command-line entrypoint.""" +"""Command-line entry point.""" # pragma: no cover from faust.cli.faust import cli diff --git a/faust/app/base.py b/faust/app/base.py index 1a0335e3c..4b481c7ef 100644 --- a/faust/app/base.py +++ b/faust/app/base.py @@ -501,7 +501,7 @@ def _init_fixups(self) -> MutableSequence[FixupT]: # One example is the Django fixup, responsible for Django integration # whenever the DJANGO_SETTINGS_MODULE environment variable is # set. See faust/fixups/django.py, it's not complicated - using - # setuptools entrypoints you can very easily create extensions that + # setuptools entry points you can very easily create extensions that # are automatically enabled by installing a PyPI package with # `pip install myname`. return list(fixups(self)) @@ -606,7 +606,7 @@ def finalize(self) -> None: # If you access configuration before an explicit call to # ``app.finalize()`` you will get an error. - # The ``app.main`` entrypoint and the ``faust -A app`` command + # The ``app.main`` entry point and the ``faust -A app`` command # both will automatically finalize the app for you. if not self.finalized: self.finalized = True @@ -922,26 +922,26 @@ def crontab(self, cron_format: str, *, timezone: tzinfo = None, on_leader: bool = False, traced: bool = True) -> Callable: - """Define periodic task using crontab description. + """Define periodic task using Crontab description. This is an ``async def`` function to be run at the fixed times, - defined by the cron format. + defined by the Cron format. Like :meth:`timer`, but executes at fixed times instead of executing at certain intervals. This decorator takes an async function and adds it to a - list of cronjobs started with the app. + list of Cronjobs started with the app. Arguments: - cron_format: The cron spec defining fixed times to run the + cron_format: The Cron spec defining fixed times to run the decorated function. Keyword Arguments: - timezone: The timezone to be taken into account for the cron jobs. + timezone: The timezone to be taken into account for the Cron jobs. If not set value from :setting:`timezone` will be taken. - on_leader: Should the cron job only run on the leader? + on_leader: Should the Cron job only run on the leader? Example: >>> @app.crontab(cron_format='30 18 * * *', diff --git a/faust/assignor/cluster_assignment.py b/faust/assignor/cluster_assignment.py index de09b9995..e52084afc 100644 --- a/faust/assignor/cluster_assignment.py +++ b/faust/assignor/cluster_assignment.py @@ -1,4 +1,4 @@ -"""Cluster assignement.""" +"""Cluster assignment.""" from typing import List, MutableMapping, Sequence, Set, cast from faust.models import Record from .client_assignment import ( diff --git a/faust/assignor/partition_assignor.py b/faust/assignor/partition_assignor.py index 6b2a7ca60..f121a9cdc 100644 --- a/faust/assignor/partition_assignor.py +++ b/faust/assignor/partition_assignor.py @@ -60,9 +60,7 @@ class PartitionAssignor(AbstractPartitionAssignor, PartitionAssignorT): Further, this assignor needs to be sticky and potentially redundant Notes: - Interface copied from - https://github.com/dpkp/kafka-python/blob/master/ - kafka/coordinator/assignors/abstract.py + Interface copied from :mod:`kafka.coordinator.assignors.abstract`. """ _metadata: ClientMetadata diff --git a/faust/channels.py b/faust/channels.py index 15129057b..f39380122 100644 --- a/faust/channels.py +++ b/faust/channels.py @@ -58,7 +58,7 @@ class Channel(ChannelT): maxsize: The maximum number of messages this channel can hold. If exceeded any new ``put`` call will block until a message is removed from the channel. - loop: The asyncio event loop to use. + loop: The :mod:`asyncio` event loop to use. """ app: AppT diff --git a/faust/cli/base.py b/faust/cli/base.py index 894dc6365..9f2ce18e3 100644 --- a/faust/cli/base.py +++ b/faust/cli/base.py @@ -84,7 +84,7 @@ def option(*option_decls: Any, """Create command-line option. SeeAlso: - func:`click.option` + :func:`click.option` """ return click.option(*option_decls, show_default=show_default, **kwargs) @@ -383,7 +383,7 @@ def make_context(self, # This is the thing that app.main(), ``python -m faust -A ...``, # and ``faust -A ..`` calls (see also faust/__main__.py, and setup.py -# in the git repository (entrypoints).) +# in the git repository (entry points).) @click.group(cls=_Group) @_apply_options(builtin_options) @@ -520,7 +520,7 @@ def __init_subclass__(self, *args: Any, **kwargs: Any) -> None: @classmethod def parse(cls, argv: Sequence[str]) -> Mapping: - """Parse command-line arguments in argv' and return mapping.""" + """Parse command-line arguments in ``argv`` and return mapping.""" return cls._parse(argv, standalone_mode=False) @staticmethod @@ -679,7 +679,7 @@ def bold(self, text: str) -> str: return self.color('b', text) def bold_tail(self, text: str, *, sep: str = '.') -> str: - """Put bold emphasis on the last part of a foo.bar.baz string.""" + """Put bold emphasis on the last part of a ``foo.bar.baz`` string.""" head, fsep, tail = text.rpartition(sep) return fsep.join([head, self.bold(tail)]) diff --git a/faust/cli/completion.py b/faust/cli/completion.py index 7b81991fc..6fc9d2039 100644 --- a/faust/cli/completion.py +++ b/faust/cli/completion.py @@ -1,4 +1,7 @@ -"""completion - Command line utility for completion (bash, ksh, zsh, etc.).""" +"""completion - Command line utility for completion. + +Supports ``bash``, ``ksh``, ``zsh``, etc. +""" import os from pathlib import Path from .base import AppCommand @@ -12,7 +15,7 @@ class completion(AppCommand): - """Output shell completion to be eval'd by the shell.""" + """Output shell completion to be evaluated by the shell.""" require_app = False diff --git a/faust/events.py b/faust/events.py index 1c1664fd9..2c6b77fbe 100644 --- a/faust/events.py +++ b/faust/events.py @@ -38,7 +38,7 @@ class Event(EventT): event.message.offset - - Iteratiing over channels/topics yields Event: + - Iterating over channels/topics yields Event: async for event in channel: ... @@ -90,7 +90,7 @@ class Event(EventT): stream objects, so in the example: The best way to access the current_event in an agent is - to use the contextvar: + to use the :class:`~contextvars.ContextVar`: .. sourcecode:: python diff --git a/faust/serializers/codecs.py b/faust/serializers/codecs.py index e3a52612f..1b862fcc5 100644 --- a/faust/serializers/codecs.py +++ b/faust/serializers/codecs.py @@ -4,21 +4,21 @@ ================ * **raw** - No encoding/serialization (bytes only). -* **json** - json with utf-8 encoding. +* **json** - json with UTF-8 encoding. * **pickle** - pickle with base64 encoding (not urlsafe). * **binary** - base64 encoding (not urlsafe). Serialization by name ===================== -The func:`dumps` function takes a codec name and the object to encode, +The :func:`dumps` function takes a codec name and the object to encode, then returns bytes: .. sourcecode:: pycon >>> s = dumps('json', obj) -For the reverse direction, the func:`loads` function takes a codec +For the reverse direction, the :func:`loads` function takes a codec name and bytes to decode: .. sourcecode:: pycon @@ -67,8 +67,8 @@ def _loads(self, s: bytes) -> Any: may also need to transfer this payload over a transport easily confused by binary data, such as JSON where everything is Unicode. -You can chain codecs together, so to add a binary text encoding like base64, -to ur codec, we use the ``|`` operator to form a combined codec: +You can chain codecs together, so to add a binary text encoding like Base64, +to your codec, we use the ``|`` operator to form a combined codec: .. sourcecode:: python @@ -91,7 +91,7 @@ def msgpack() -> codecs.Codec: happens before we use the feature. Faust also supports registering *codec extensions* -using setuptools entrypoints, so instead we can create an installable msgpack +using setuptools entry points, so instead we can create an installable msgpack extension. To do so we need to define a package with the following directory layout: @@ -301,7 +301,7 @@ def _dumps(self, s: bytes) -> bytes: def register(name: str, codec: CodecT) -> None: - """Register new codec in the codec registy.""" + """Register new codec in the codec registry.""" codecs[name] = codec diff --git a/faust/serializers/registry.py b/faust/serializers/registry.py index 758106b14..be4a61b0d 100644 --- a/faust/serializers/registry.py +++ b/faust/serializers/registry.py @@ -83,7 +83,7 @@ def loads_value(self, Arguments: typ: Model to use for deserialization. - value: Bytestring to deserialize. + value: bytes to deserialize. serializer: Codec to use for this value. If not set the default will be used (:attr:`value_serializer`). @@ -130,8 +130,8 @@ def dumps_key(self, """Serialize key. Arguments: - typ: Model hint (can also be str/bytes). - When `typ=str` or `bytes`, raw serializer is assumed. + typ: Model hint (can also be :class:`str`/:class:`bytes`). + When ``typ=str`` or :class:`bytes`, raw serializer is assumed. key: The key value to serializer. serializer: Codec to use for this key, if it is not a model type. @@ -158,8 +158,8 @@ def dumps_value(self, """Serialize value. Arguments: - typ: Model hint (can also be str/bytes). - When `typ=str` or `bytes`, raw serializer is assumed. + typ: Model hint (can also be :class:`str`/:class:`bytes`). + When ``typ=str`` or :class:`bytes`, raw serializer is assumed. key: The value to serializer. serializer: Codec to use for this value, if it is not a model type. diff --git a/faust/streams.py b/faust/streams.py index 06c826f25..1db399e85 100644 --- a/faust/streams.py +++ b/faust/streams.py @@ -239,7 +239,7 @@ def clone(self, **kwargs: Any) -> StreamT: """Create a clone of this stream. Notes: - If the cloned stream is supposed to "supercede" this stream, + If the cloned stream is supposed to supersede this stream, like in ``group_by``/``through``/etc., you should use :meth:`_chain` instead so `stream._next = cloned_stream` is set and :meth:`get_active_stream` returns the cloned stream. diff --git a/faust/tables/objects.py b/faust/tables/objects.py index 87044e5e4..40c3864bf 100644 --- a/faust/tables/objects.py +++ b/faust/tables/objects.py @@ -28,7 +28,7 @@ class ChangeloggedObject: - """A changlogged object in a :class:`ChangeloggedObjectManager` store.""" + """A changelogged object in a :class:`ChangeloggedObjectManager` store.""" manager: 'ChangeloggedObjectManager' diff --git a/faust/tables/recovery.py b/faust/tables/recovery.py index 5b891d705..482d836a3 100644 --- a/faust/tables/recovery.py +++ b/faust/tables/recovery.py @@ -49,31 +49,31 @@ class Recovery(Service): stats_interval: float = 5.0 - #: Set of standby tps. + #: Set of standby topic partitions. standby_tps: Set[TP] - #: Set of active tps. + #: Set of active topic partitions. active_tps: Set[TP] actives_for_table: MutableMapping[CollectionT, Set[TP]] standbys_for_table: MutableMapping[CollectionT, Set[TP]] - #: Mapping from TP to table + #: Mapping from topic partition to table tp_to_table: MutableMapping[TP, CollectionT] - #: Active offset by TP. + #: Active offset by topic partition. active_offsets: Counter[TP] - #: Standby offset by TP. + #: Standby offset by topic partition. standby_offsets: Counter[TP] - #: Mapping of highwaters by tp. + #: Mapping of highwaters by topic partition. highwaters: Counter[TP] - #: Active highwaters by TP. + #: Active highwaters by topic partition. active_highwaters: Counter[TP] - #: Standby highwaters by TP. + #: Standby highwaters by topic partition. standby_highwaters: Counter[TP] _signal_recovery_start: Optional[Event] = None @@ -90,7 +90,7 @@ class Recovery(Service): #: and need to be flushed before starting new recovery/stopping. buffers: MutableMapping[CollectionT, List[EventT]] - #: Cache of buffer size by TopicPartitiojn. + #: Cache of buffer size by topic partition.. buffer_sizes: MutableMapping[TP, int] _recovery_span: Optional[opentracing.Span] = None diff --git a/faust/topics.py b/faust/topics.py index 949667e83..91efb0978 100644 --- a/faust/topics.py +++ b/faust/topics.py @@ -74,8 +74,9 @@ class Topic(Channel, TopicT): declared, and ``autoCreateTopics`` is enabled on the Kafka Server, the number of partitions used will be specified by the server configuration. - retention: Number of seconds (as float/timedelta) to keep messages - in the topic before they can be expired by the server. + retention: Number of seconds (as float/:class:`~datetime.timedelta`) + to keep messages in the topic before they can + be expired by the server. pattern: Regular expression evaluated to decide what topics to subscribe to. You cannot specify both topics and a pattern. key_type: How to deserialize keys for messages in this topic. @@ -271,7 +272,7 @@ def derive(self, **kwargs: Any) -> ChannelT: """Create new :class:`Topic` derived from this topic. Configuration will be copied from this topic, but any parameter - overriden as a keyword argument. + overridden as a keyword argument. See Also: :meth:`derive_topic`: for a list of supported keyword arguments. diff --git a/faust/transport/base.py b/faust/transport/base.py index 454f1addb..7c918aba1 100644 --- a/faust/transport/base.py +++ b/faust/transport/base.py @@ -6,7 +6,7 @@ - Creates new consumers/producers. To see a reference transport implementation go to: -faust/transport/drivers/aiokafka.py +:file:`faust/transport/drivers/aiokafka.py` """ import asyncio from typing import Any, ClassVar, List, Type diff --git a/faust/transport/conductor.py b/faust/transport/conductor.py index d109b61aa..2696eef31 100644 --- a/faust/transport/conductor.py +++ b/faust/transport/conductor.py @@ -47,7 +47,7 @@ class _Topic: ... # noqa class ConductorCompiler: # pragma: no cover - """Pregenerate a function to handle the messages for a topic+partition.""" + """Compile a function to handle the messages for a topic+partition.""" def build(self, conductor: 'Conductor', diff --git a/faust/transport/consumer.py b/faust/transport/consumer.py index 5de2afcfa..cef25f1de 100644 --- a/faust/transport/consumer.py +++ b/faust/transport/consumer.py @@ -18,13 +18,13 @@ + Messages are reference counted, and the Conductor increases the reference count to the number of subscribed streams. - + Stream.__aiter__ is set up in a way such that when what is iterating - over the stream is finished with the message, a finally: block will - decrease the reference count by one. + + ``Stream.__aiter__`` is set up in a way such that when what is + iterating over the stream is finished with the message, a + finally: block will decrease the reference count by one. + When the reference count for a message hits zero, the stream will - call ``Consumer.ack(message)``, which will mark that tp+offset - combination as "commitable" + call ``Consumer.ack(message)``, which will mark that topic + + partition + offset combination as "committable" + If all the streams share the same key_type/value_type, the conductor will only deserialize the payload once. @@ -35,7 +35,7 @@ offset. - If the consumer marked an offset as committable this thread - will advance the comitted offset. + will advance the committed offset. + To find the offset that it can safely advance to the commit thread will traverse the _acked mapping of TP to list of acked offsets, by @@ -854,11 +854,11 @@ async def _commit_offsets(self, offsets: Mapping[TP, int], ) self.log.dev('COMMITTING OFFSETS:\n%s', table) assignment = self.assignment() - commitable_offsets: Dict[TP, int] = {} + committable_offsets: Dict[TP, int] = {} revoked: Dict[TP, int] = {} for tp, offset in offsets.items(): if tp in assignment: - commitable_offsets[tp] = offset + committable_offsets[tp] = offset else: revoked[tp] = offset if revoked: @@ -867,25 +867,25 @@ async def _commit_offsets(self, offsets: Mapping[TP, int], 'will be eventually processed again: %r', revoked, ) - if not commitable_offsets: + if not committable_offsets: return False with flight_recorder(self.log, timeout=300.0) as on_timeout: did_commit = False on_timeout.info('+consumer.commit()') if self.in_transaction: did_commit = await self.transactions.commit( - commitable_offsets, + committable_offsets, start_new_transaction=start_new_transaction, ) else: - did_commit = await self._commit(commitable_offsets) + did_commit = await self._commit(committable_offsets) on_timeout.info('-consumer.commit()') if did_commit: on_timeout.info('+tables.on_commit') - self.app.tables.on_commit(commitable_offsets) + self.app.tables.on_commit(committable_offsets) on_timeout.info('-tables.on_commit') - self._committed_offset.update(commitable_offsets) - self.app.monitor.on_tp_commit(commitable_offsets) + self._committed_offset.update(committable_offsets) + self.app.monitor.on_tp_commit(committable_offsets) self._last_batch = None return did_commit diff --git a/faust/types/settings.py b/faust/types/settings.py index e901b0aad..861e72596 100644 --- a/faust/types/settings.py +++ b/faust/types/settings.py @@ -93,10 +93,10 @@ class _WorkerT: ... # noqa #: Table storage URL, used as default for :setting:`store`. STORE_URL = 'memory://' -#: Cache storage URL, used as default for setting:`cache`. +#: Cache storage URL, used as default for :setting:`cache`. CACHE_URL = 'memory://' -#: Web driver URL, used as default for setting:`web`. +#: Web driver URL, used as default for :setting:`web`. WEB_URL = 'aiohttp://' PROCESSING_GUARANTEE = ProcessingGuarantee.AT_LEAST_ONCE @@ -204,7 +204,7 @@ class _WorkerT: ... # noqa STREAM_RECOVERY_DELAY = 3.0 #: We buffer up sending messages until the -#: source topic offset related to that processsing is committed. +#: source topic offset related to that processing is committed. #: This means when we do commit, we may have buffered up a LOT of messages #: so commit frequently. #: @@ -252,7 +252,7 @@ class _WorkerT: ... # noqa #: at all. The message will immediately be considered sent. #: (Not recommended) #: 1: The broker leader will write the record to its local log but -#: will respond without awaiting full acknowledgement from all +#: will respond without awaiting full acknowledgment from all #: followers. In this case should the leader fail immediately #: after acknowledging the record but before the followers have #: replicated it then the record will be lost. diff --git a/faust/types/transports.py b/faust/types/transports.py index 400add13e..a76f8c6a9 100644 --- a/faust/types/transports.py +++ b/faust/types/transports.py @@ -432,7 +432,7 @@ class TransportT(abc.ABC): url: List[URL] #: String identifying the underlying driver used for this transport. - #: E.g. for :pypi:`aiokafka` this could be "aiokafka 0.4.1". + #: E.g. for :pypi:`aiokafka` this could be ``aiokafka 0.4.1``. driver_version: str loop: asyncio.AbstractEventLoop diff --git a/faust/utils/cron.py b/faust/utils/cron.py index c907a81e1..4b5a5f8b1 100644 --- a/faust/utils/cron.py +++ b/faust/utils/cron.py @@ -5,7 +5,7 @@ def secs_for_next(cron_format: str, tz: tzinfo = None) -> float: - """Return seconds until next execution given crontab style format.""" + """Return seconds until next execution given Crontab style format.""" now_ts = time.time() # If we have a tz object we'll make now timezone aware, and # if not will set now to be the current timestamp (tz diff --git a/faust/utils/tracing.py b/faust/utils/tracing.py index 71e941a7d..65133b42c 100644 --- a/faust/utils/tracing.py +++ b/faust/utils/tracing.py @@ -64,7 +64,7 @@ def operation_name_from_fun(fun: Any) -> str: def traced_from_parent_span(parent_span: opentracing.Span = None, **extra_context: Any) -> Callable: - """Decorate fucntion to be traced from parent span.""" + """Decorate function to be traced from parent span.""" def _wrapper(fun: Callable, **more_context: Any) -> Callable: operation_name = operation_name_from_fun(fun) @wraps(fun) diff --git a/faust/web/drivers/aiohttp.py b/faust/web/drivers/aiohttp.py index c01924c76..311a9040d 100644 --- a/faust/web/drivers/aiohttp.py +++ b/faust/web/drivers/aiohttp.py @@ -77,7 +77,7 @@ async def on_stop(self) -> None: class Web(base.Web): - """Web server and framework implemention using :pypi:`aiohttp`.""" + """Web server and framework implementation using :pypi:`aiohttp`.""" driver_version = f'aiohttp={aiohttp_version}' handler_shutdown_timeout: float = 60.0 diff --git a/requirements/docs-plugins.txt b/requirements/docs-plugins.txt new file mode 100644 index 000000000..e6b043b35 --- /dev/null +++ b/requirements/docs-plugins.txt @@ -0,0 +1,4 @@ +alabaster +sphinx_autodoc_annotation +sphinx_celery>=1.4.7,<2.0 +sphinxcontrib-asyncio diff --git a/requirements/docs.txt b/requirements/docs.txt index 868e539cf..a26aa2526 100644 --- a/requirements/docs.txt +++ b/requirements/docs.txt @@ -1,5 +1,2 @@ -alabaster +-r docs-plugins.txt sphinx>=2.0,<3.0 -sphinx_autodoc_annotation -sphinx_celery>=1.4.7,<2.0 -sphinxcontrib-asyncio diff --git a/requirements/spell.txt b/requirements/spell.txt new file mode 100644 index 000000000..8b7308e9d --- /dev/null +++ b/requirements/spell.txt @@ -0,0 +1,3 @@ +-r docs-plugins.txt +sphinx<2.0 +sphinxcontrib-spelling diff --git a/t/unit/transport/test_consumer.py b/t/unit/transport/test_consumer.py index 1b67590be..c31cd1dbd 100644 --- a/t/unit/transport/test_consumer.py +++ b/t/unit/transport/test_consumer.py @@ -771,9 +771,9 @@ async def test_commit_tps__ProducerSendError(self, *, consumer): consumer.crash.assert_called_once_with(exc) @pytest.mark.asyncio - async def test_commit_tps__no_commitable(self, *, consumer): - consumer._filter_commitable_offsets = Mock(name='filt') - consumer._filter_commitable_offsets.return_value = {} + async def test_commit_tps__no_committable(self, *, consumer): + consumer._filter_committable_offsets = Mock(name='filt') + consumer._filter_committable_offsets.return_value = {} await consumer._commit_tps( {TP1, TP2}, start_new_transaction=True, @@ -866,7 +866,7 @@ async def test_commit_offsets__in_transaction(self, *, consumer): assert ret is consumer.transactions.commit.coro() @pytest.mark.asyncio - async def test_commit_offsets__no_commitable_offsets(self, *, consumer): + async def test_commit_offsets__no_committable_offsets(self, *, consumer): consumer.current_assignment.clear() assert not await consumer._commit_offsets({ TP1: 3003, diff --git a/tox.ini b/tox.ini index 25b613aa4..79e9598df 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = 3.7,3.6,flake8,apicheck,configcheck,typecheck,docstyle,bandit +envlist = 3.7,3.6,flake8,apicheck,configcheck,typecheck,docstyle,bandit,spell [testenv] deps= @@ -8,6 +8,7 @@ deps= -r{toxinidir}/requirements/ci.txt linkcheck,apicheck,configcheck: -r{toxinidir}/requirements/docs.txt + spell: -r{toxinidir}/requirements/spell.txt flake8,docstyle: -r{toxinidir}/requirements/dist.txt bandit: bandit passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY @@ -16,7 +17,7 @@ recreate = False commands = py.test --random-order --open-files -xvv --cov=faust t/unit t/functional t/integration t/meticulous/ t/regression basepython = - 3.7,flake8,apicheck,linkcheck,configcheck,typecheck,docstyle,bandit: python3.7 + 3.7,flake8,apicheck,linkcheck,configcheck,typecheck,docstyle,bandit,spell: python3.7 3.6: python3.6 [testenv:apicheck] @@ -34,6 +35,12 @@ commands = commands = sphinx-build -W -b linkcheck -d {envtmpdir}/doctrees2 docs docs/_build/linkcheck +[testenv:spell] +setenv = + SPELLCHECK=1 +commands = + sphinx-build -W -b spell -d {envtmpdir}/doctrees2 docs docs/_build/spell + [testenv:flake8] commands = flake8 {toxinidir}/faust {toxinidir}/t {toxinidir}/examples