summaryrefslogtreecommitdiff
path: root/taskflow
diff options
context:
space:
mode:
authorRick van de Loo <rickvandeloo@gmail.com>2015-03-28 20:13:21 +1100
committerRick van de Loo <rickvandeloo@gmail.com>2015-03-29 03:11:08 +1100
commit5544d71bc897906253cf98771129f2997f4e19ae (patch)
treea51ce47651cd1fe8359b57586382dc5e46d01215 /taskflow
parent179854e3eee11306cb888ef669cd3728b5e5bbb5 (diff)
downloadtaskflow-5544d71bc897906253cf98771129f2997f4e19ae.tar.gz
Fix a couple of spelling and grammar errors
Some things that popped out while reading the comments/documentation. Change-Id: I0ccecae3381447ede44bb855d91f997349be1562
Diffstat (limited to 'taskflow')
-rw-r--r--taskflow/examples/alphabet_soup.py2
-rw-r--r--taskflow/examples/build_a_car.py2
-rw-r--r--taskflow/examples/calculate_in_parallel.py2
-rw-r--r--taskflow/examples/create_parallel_volume.py2
-rw-r--r--taskflow/examples/echo_listener.py4
-rw-r--r--taskflow/examples/fake_billing.py4
-rw-r--r--taskflow/examples/hello_world.py2
-rw-r--r--taskflow/examples/jobboard_produce_consume_colors.py4
-rw-r--r--taskflow/examples/persistence_example.py10
-rw-r--r--taskflow/examples/resume_vm_boot.py2
-rw-r--r--taskflow/examples/resume_volume_create.py2
-rw-r--r--taskflow/examples/retry_flow.py2
-rw-r--r--taskflow/examples/run_by_iter.py2
-rw-r--r--taskflow/examples/run_by_iter_enumerate.py4
-rw-r--r--taskflow/examples/simple_linear.py4
-rw-r--r--taskflow/examples/simple_linear_listening.py4
-rw-r--r--taskflow/examples/simple_linear_pass.py2
-rw-r--r--taskflow/examples/simple_map_reduce.py4
-rw-r--r--taskflow/examples/timing_listener.py2
-rw-r--r--taskflow/examples/wbe_event_sender.py8
-rw-r--r--taskflow/examples/wbe_mandelbrot.py4
21 files changed, 36 insertions, 36 deletions
diff --git a/taskflow/examples/alphabet_soup.py b/taskflow/examples/alphabet_soup.py
index a287f53..eb199f8 100644
--- a/taskflow/examples/alphabet_soup.py
+++ b/taskflow/examples/alphabet_soup.py
@@ -38,7 +38,7 @@ from taskflow import task
# In this example we show how a simple linear set of tasks can be executed
-# using local processes (and not threads or remote workers) with minimial (if
+# using local processes (and not threads or remote workers) with minimal (if
# any) modification to those tasks to make them safe to run in this mode.
#
# This is useful since it allows further scaling up your workflows when thread
diff --git a/taskflow/examples/build_a_car.py b/taskflow/examples/build_a_car.py
index 02be020..a329181 100644
--- a/taskflow/examples/build_a_car.py
+++ b/taskflow/examples/build_a_car.py
@@ -38,7 +38,7 @@ ANY = notifier.Notifier.ANY
import example_utils as eu # noqa
-# INTRO: This examples shows how a graph flow and linear flow can be used
+# INTRO: This example shows how a graph flow and linear flow can be used
# together to execute dependent & non-dependent tasks by going through the
# steps required to build a simplistic car (an assembly line if you will). It
# also shows how raw functions can be wrapped into a task object instead of
diff --git a/taskflow/examples/calculate_in_parallel.py b/taskflow/examples/calculate_in_parallel.py
index 7ab32fa..0d800a6 100644
--- a/taskflow/examples/calculate_in_parallel.py
+++ b/taskflow/examples/calculate_in_parallel.py
@@ -30,7 +30,7 @@ from taskflow.patterns import linear_flow as lf
from taskflow.patterns import unordered_flow as uf
from taskflow import task
-# INTRO: This examples shows how a linear flow and a unordered flow can be
+# INTRO: These examples show how a linear flow and an unordered flow can be
# used together to execute calculations in parallel and then use the
# result for the next task/s. The adder task is used for all calculations
# and argument bindings are used to set correct parameters for each task.
diff --git a/taskflow/examples/create_parallel_volume.py b/taskflow/examples/create_parallel_volume.py
index c23bf34..9bab0ba 100644
--- a/taskflow/examples/create_parallel_volume.py
+++ b/taskflow/examples/create_parallel_volume.py
@@ -35,7 +35,7 @@ from taskflow.listeners import printing
from taskflow.patterns import unordered_flow as uf
from taskflow import task
-# INTRO: This examples shows how unordered_flow can be used to create a large
+# INTRO: These examples show how unordered_flow can be used to create a large
# number of fake volumes in parallel (or serially, depending on a constant that
# can be easily changed).
diff --git a/taskflow/examples/echo_listener.py b/taskflow/examples/echo_listener.py
index a8eebf6..99871a8 100644
--- a/taskflow/examples/echo_listener.py
+++ b/taskflow/examples/echo_listener.py
@@ -31,8 +31,8 @@ from taskflow.patterns import linear_flow as lf
from taskflow import task
# INTRO: This example walks through a miniature workflow which will do a
-# simple echo operation; during this execution a listener is assocated with
-# the engine to recieve all notifications about what the flow has performed,
+# simple echo operation; during this execution a listener is associated with
+# the engine to receive all notifications about what the flow has performed,
# this example dumps that output to the stdout for viewing (at debug level
# to show all the information which is possible).
diff --git a/taskflow/examples/fake_billing.py b/taskflow/examples/fake_billing.py
index 5d26a2d..33804b5 100644
--- a/taskflow/examples/fake_billing.py
+++ b/taskflow/examples/fake_billing.py
@@ -36,8 +36,8 @@ from taskflow.patterns import linear_flow as lf
from taskflow import task
from taskflow.utils import misc
-# INTRO: This example walks through a miniature workflow which simulates a
-# the reception of a API request, creation of a database entry, driver
+# INTRO: This example walks through a miniature workflow which simulates
+# the reception of an API request, creation of a database entry, driver
# activation (which invokes a 'fake' webservice) and final completion.
#
# This example also shows how a function/object (in this class the url sending)
diff --git a/taskflow/examples/hello_world.py b/taskflow/examples/hello_world.py
index f8e0bb2..caba527 100644
--- a/taskflow/examples/hello_world.py
+++ b/taskflow/examples/hello_world.py
@@ -34,7 +34,7 @@ from taskflow.utils import eventlet_utils
# INTRO: This is the defacto hello world equivalent for taskflow; it shows how
-# a overly simplistic workflow can be created that runs using different
+# an overly simplistic workflow can be created that runs using different
# engines using different styles of execution (all can be used to run in
# parallel if a workflow is provided that is parallelizable).
diff --git a/taskflow/examples/jobboard_produce_consume_colors.py b/taskflow/examples/jobboard_produce_consume_colors.py
index 80c2acb..6f58603 100644
--- a/taskflow/examples/jobboard_produce_consume_colors.py
+++ b/taskflow/examples/jobboard_produce_consume_colors.py
@@ -40,7 +40,7 @@ from taskflow.utils import threading_utils
# In this example we show how a jobboard can be used to post work for other
# entities to work on. This example creates a set of jobs using one producer
# thread (typically this would be split across many machines) and then having
-# other worker threads with there own jobboards select work using a given
+# other worker threads with their own jobboards select work using a given
# filters [red/blue] and then perform that work (and consuming or abandoning
# the job after it has been completed or failed).
@@ -66,7 +66,7 @@ PRODUCER_UNITS = 10
# How many units of work are expected to be produced (used so workers can
# know when to stop running and shutdown, typically this would not be a
-# a value but we have to limit this examples execution time to be less than
+# a value but we have to limit this example's execution time to be less than
# infinity).
EXPECTED_UNITS = PRODUCER_UNITS * PRODUCERS
diff --git a/taskflow/examples/persistence_example.py b/taskflow/examples/persistence_example.py
index fe5968f..c911c2f 100644
--- a/taskflow/examples/persistence_example.py
+++ b/taskflow/examples/persistence_example.py
@@ -68,15 +68,15 @@ class ByeTask(task.Task):
print("Bye!")
-# This generates your flow structure (at this stage nothing is ran).
+# This generates your flow structure (at this stage nothing is run).
def make_flow(blowup=False):
flow = lf.Flow("hello-world")
flow.add(HiTask(), ByeTask(blowup))
return flow
-# Persist the flow and task state here, if the file/dir exists already blowup
-# if not don't blowup, this allows a user to see both the modes and to see
+# Persist the flow and task state here, if the file/dir exists already blow up
+# if not don't blow up, this allows a user to see both the modes and to see
# what is stored in each case.
if eu.SQLALCHEMY_AVAILABLE:
persist_path = os.path.join(tempfile.gettempdir(), "persisting.db")
@@ -91,8 +91,8 @@ else:
blowup = True
with eu.get_backend(backend_uri) as backend:
- # Make a flow that will blowup if the file doesn't exist previously, if it
- # did exist, assume we won't blowup (and therefore this shows the undo
+ # Make a flow that will blow up if the file didn't exist previously, if it
+ # did exist, assume we won't blow up (and therefore this shows the undo
# and redo that a flow will go through).
book = logbook.LogBook("my-test")
flow = make_flow(blowup=blowup)
diff --git a/taskflow/examples/resume_vm_boot.py b/taskflow/examples/resume_vm_boot.py
index 4e93f78..8c7d4ae 100644
--- a/taskflow/examples/resume_vm_boot.py
+++ b/taskflow/examples/resume_vm_boot.py
@@ -44,7 +44,7 @@ from taskflow.utils import persistence_utils as p_utils
import example_utils as eu # noqa
-# INTRO: This examples shows how a hierarchy of flows can be used to create a
+# INTRO: These examples show how a hierarchy of flows can be used to create a
# vm in a reliable & resumable manner using taskflow + a miniature version of
# what nova does while booting a vm.
diff --git a/taskflow/examples/resume_volume_create.py b/taskflow/examples/resume_volume_create.py
index 275fa6b..93025d9 100644
--- a/taskflow/examples/resume_volume_create.py
+++ b/taskflow/examples/resume_volume_create.py
@@ -39,7 +39,7 @@ from taskflow.utils import persistence_utils as p_utils
import example_utils # noqa
-# INTRO: This examples shows how a hierarchy of flows can be used to create a
+# INTRO: These examples show how a hierarchy of flows can be used to create a
# pseudo-volume in a reliable & resumable manner using taskflow + a miniature
# version of what cinder does while creating a volume (very miniature).
diff --git a/taskflow/examples/retry_flow.py b/taskflow/examples/retry_flow.py
index c1cb1a2..3123aca 100644
--- a/taskflow/examples/retry_flow.py
+++ b/taskflow/examples/retry_flow.py
@@ -32,7 +32,7 @@ from taskflow import task
# INTRO: In this example we create a retry controller that receives a phone
# directory and tries different phone numbers. The next task tries to call Jim
-# using the given number. If if is not a Jim's number, the tasks raises an
+# using the given number. If it is not a Jim's number, the task raises an
# exception and retry controller takes the next number from the phone
# directory and retries the call.
#
diff --git a/taskflow/examples/run_by_iter.py b/taskflow/examples/run_by_iter.py
index 4b7b98c..3a00a10 100644
--- a/taskflow/examples/run_by_iter.py
+++ b/taskflow/examples/run_by_iter.py
@@ -37,7 +37,7 @@ from taskflow import task
from taskflow.utils import persistence_utils
-# INTRO: This examples shows how to run a set of engines at the same time, each
+# INTRO: This example shows how to run a set of engines at the same time, each
# running in different engines using a single thread of control to iterate over
# each engine (which causes that engine to advanced to its next state during
# each iteration).
diff --git a/taskflow/examples/run_by_iter_enumerate.py b/taskflow/examples/run_by_iter_enumerate.py
index d954d6a..07334cc 100644
--- a/taskflow/examples/run_by_iter_enumerate.py
+++ b/taskflow/examples/run_by_iter_enumerate.py
@@ -33,10 +33,10 @@ from taskflow.persistence import backends as persistence_backends
from taskflow import task
from taskflow.utils import persistence_utils
-# INTRO: This examples shows how to run a engine using the engine iteration
+# INTRO: These examples show how to run an engine using the engine iteration
# capability, in between iterations other activities occur (in this case a
# value is output to stdout); but more complicated actions can occur at the
-# boundary when a engine yields its current state back to the caller.
+# boundary when an engine yields its current state back to the caller.
class EchoNameTask(task.Task):
diff --git a/taskflow/examples/simple_linear.py b/taskflow/examples/simple_linear.py
index 495b963..daa53dd 100644
--- a/taskflow/examples/simple_linear.py
+++ b/taskflow/examples/simple_linear.py
@@ -41,8 +41,8 @@ from taskflow import task
# taskflow provides via tasks and flows makes it possible for you to easily at
# a later time hook in a persistence layer (and then gain the functionality
# that offers) when you decide the complexity of adding that layer in
-# is 'worth it' for your applications usage pattern (which certain applications
-# may not need).
+# is 'worth it' for your application's usage pattern (which certain
+# applications may not need).
class CallJim(task.Task):
diff --git a/taskflow/examples/simple_linear_listening.py b/taskflow/examples/simple_linear_listening.py
index d14c82c..deff63c 100644
--- a/taskflow/examples/simple_linear_listening.py
+++ b/taskflow/examples/simple_linear_listening.py
@@ -37,7 +37,7 @@ ANY = notifier.Notifier.ANY
# a given ~phone~ number (provided as a function input) in a linear fashion
# (one after the other).
#
-# For a workflow which is serial this shows a extremely simple way
+# For a workflow which is serial this shows an extremely simple way
# of structuring your tasks (the code that does the work) into a linear
# sequence (the flow) and then passing the work off to an engine, with some
# initial data to be ran in a reliable manner.
@@ -92,7 +92,7 @@ engine = taskflow.engines.load(flow, store={
})
# This is where we attach our callback functions to the 2 different
-# notification objects that a engine exposes. The usage of a '*' (kleene star)
+# notification objects that an engine exposes. The usage of a '*' (kleene star)
# here means that we want to be notified on all state changes, if you want to
# restrict to a specific state change, just register that instead.
engine.notifier.register(ANY, flow_watch)
diff --git a/taskflow/examples/simple_linear_pass.py b/taskflow/examples/simple_linear_pass.py
index d378418..683f251 100644
--- a/taskflow/examples/simple_linear_pass.py
+++ b/taskflow/examples/simple_linear_pass.py
@@ -31,7 +31,7 @@ from taskflow import engines
from taskflow.patterns import linear_flow
from taskflow import task
-# INTRO: This examples shows how a task (in a linear/serial workflow) can
+# INTRO: This example shows how a task (in a linear/serial workflow) can
# produce an output that can be then consumed/used by a downstream task.
diff --git a/taskflow/examples/simple_map_reduce.py b/taskflow/examples/simple_map_reduce.py
index 3a47fdc..6476b48 100644
--- a/taskflow/examples/simple_map_reduce.py
+++ b/taskflow/examples/simple_map_reduce.py
@@ -27,9 +27,9 @@ top_dir = os.path.abspath(os.path.join(os.path.dirname(__file__),
sys.path.insert(0, top_dir)
sys.path.insert(0, self_dir)
-# INTRO: this examples shows a simplistic map/reduce implementation where
+# INTRO: These examples show a simplistic map/reduce implementation where
# a set of mapper(s) will sum a series of input numbers (in parallel) and
-# return there individual summed result. A reducer will then use those
+# return their individual summed result. A reducer will then use those
# produced values and perform a final summation and this result will then be
# printed (and verified to ensure the calculation was as expected).
diff --git a/taskflow/examples/timing_listener.py b/taskflow/examples/timing_listener.py
index 68d350f..4e73154 100644
--- a/taskflow/examples/timing_listener.py
+++ b/taskflow/examples/timing_listener.py
@@ -36,7 +36,7 @@ from taskflow import task
# and have variable run time tasks run and show how the listener will print
# out how long those tasks took (when they started and when they finished).
#
-# This shows how timing metrics can be gathered (or attached onto a engine)
+# This shows how timing metrics can be gathered (or attached onto an engine)
# after a workflow has been constructed, making it easy to gather metrics
# dynamically for situations where this kind of information is applicable (or
# even adding this information on at a later point in the future when your
diff --git a/taskflow/examples/wbe_event_sender.py b/taskflow/examples/wbe_event_sender.py
index e5b075a..9f9dbd8 100644
--- a/taskflow/examples/wbe_event_sender.py
+++ b/taskflow/examples/wbe_event_sender.py
@@ -36,10 +36,10 @@ from taskflow.utils import threading_utils
ANY = notifier.Notifier.ANY
-# INTRO: This examples shows how to use a remote workers event notification
+# INTRO: These examples show how to use a remote worker's event notification
# attribute to proxy back task event notifications to the controlling process.
#
-# In this case a simple set of events are triggered by a worker running a
+# In this case a simple set of events is triggered by a worker running a
# task (simulated to be remote by using a kombu memory transport and threads).
# Those events that the 'remote worker' produces will then be proxied back to
# the task that the engine is running 'remotely', and then they will be emitted
@@ -113,10 +113,10 @@ if __name__ == "__main__":
workers = []
# These topics will be used to request worker information on; those
- # workers will respond with there capabilities which the executing engine
+ # workers will respond with their capabilities which the executing engine
# will use to match pending tasks to a matched worker, this will cause
# the task to be sent for execution, and the engine will wait until it
- # is finished (a response is recieved) and then the engine will either
+ # is finished (a response is received) and then the engine will either
# continue with other tasks, do some retry/failure resolution logic or
# stop (and potentially re-raise the remote workers failure)...
worker_topics = []
diff --git a/taskflow/examples/wbe_mandelbrot.py b/taskflow/examples/wbe_mandelbrot.py
index c59b85c..48db5e6 100644
--- a/taskflow/examples/wbe_mandelbrot.py
+++ b/taskflow/examples/wbe_mandelbrot.py
@@ -111,11 +111,11 @@ def calculate(engine_conf):
# an image bitmap file.
# And unordered flow is used here since the mandelbrot calculation is an
- # example of a embarrassingly parallel computation that we can scatter
+ # example of an embarrassingly parallel computation that we can scatter
# across as many workers as possible.
flow = uf.Flow("mandelbrot")
- # These symbols will be automatically given to tasks as input to there
+ # These symbols will be automatically given to tasks as input to their
# execute method, in this case these are constants used in the mandelbrot
# calculation.
store = {