I am trying to use IPython.parallel map. The inputs to the function I wish to parallelize are generators. Because of size/memory it is not possible for me to convert the generators to lists. See code below:
from itertools import product
from IPython.parallel import Client
c = Client()
v = c[:]
c.ids
def stringcount(longstring, substrings):
scount = [longstring.count(s) for s in substrings]
return scount
substrings = product('abc', repeat=2)
longstring = product('abc', repeat=3)
# This is what I want to do in parallel
# I should be 'for longs in longstring' I use range() because it can get long.
for num in range(10):
longs = longstring.next()
subs = substrings.next()
print(subs, longs)
count = stringcount(longs, subs)
print(count)
# This does not work, and I understand why.
# I don't know how to fix it while keeping longstring and substrings as
# generators
v.map(stringcount, longstring, substrings)
for r in v:
print(r.get())
You can't use View.map
with a generator without walking through the entire generator first. But you can write your own custom function to submit batches of tasks from a generator and wait for them incrementally. I don't have a more interesting example, but I can illustrate with a terrible implementation of a prime search.
Start with our token 'data generator':
from math import sqrt
def generate_possible_factors(N):
"""generator for iterating through possible factors for N
yields 2, every odd integer <= sqrt(N)
"""
if N <= 3:
return
yield 2
f = 3
last = int(sqrt(N))
while f <= last:
yield f
f += 2
This just generates a sequence of integers to use when testing if a number is prime.
Now our trivial function that we will use as a task with IPython.parallel
def is_factor(f, N):
"""is f a factor of N?"""
return (N % f) == 0
and a complete implementation of prime check using the generator and our factor function:
def dumb_prime(N):
"""dumb implementation of is N prime?"""
for f in generate_possible_factors(N):
if is_factor(f, N):
return False
return True
A parallel version that only submits a limited number of tasks at a time:
def parallel_dumb_prime(N, v, max_outstanding=10, dt=0.1):
"""dumb_prime where each factor is checked remotely
Up to `max_outstanding` factors will be checked in parallel.
Submission will halt as soon as we know that N is not prime.
"""
tasks = set()
# factors is a generator
factors = generate_possible_factors(N)
while True:
try:
# submit a batch of tasks, with a maximum of `max_outstanding`
for i in range(max_outstanding-len(tasks)):
f = factors.next()
tasks.add(v.apply_async(is_factor, f, N))
except StopIteration:
# no more factors to test, stop submitting
break
# get the tasks that are done
ready = set(task for task in tasks if task.ready())
while not ready:
# wait a little bit for some tasks to finish
v.wait(tasks, timeout=dt)
ready = set(task for task in tasks if task.ready())
for t in ready:
# get the result - if True, N is not prime, we are done
if t.get():
return False
# update tasks to only those that are still pending,
# and submit the next batch
tasks.difference_update(ready)
# check the last few outstanding tasks
for task in tasks:
if t.get():
return False
# checked all candidates, none are factors, so N is prime
return True
This submits a limited number of tasks at a time, and as soon as we know that N is not prime, we stop consuming the generator.
To use this function:
from IPython import parallel
rc = parallel.Client()
view = rc.load_balanced_view()
for N in range(900,1000):
if parallel_dumb_prime(N, view, 10):
print N
A more complete illustration in a notebook.
I took a slightly different approach to your problem that may be useful to others. Below, I attempted to mimic the behavior of the multiprocessing.pool.Pool.imap
method by wrapping IPython.parallel.map
. This required me to re-write your functions slightly.
import IPython
from itertools import product
def stringcount((longstring, substrings)):
scount = [longstring.count(s) for s in substrings]
return (longstring, substrings, scount)
def gen_pairs(long_string, sub_strings):
for l in long_string:
s = sub_strings.next()
yield (l, s)
def imap(function, generator, view, preprocessor=iter, chunksize=256):
num_cores = len(view.client.ids)
queue = []
for i, n in enumerate(preprocessor(generator)):
queue.append(n)
if not i % (chunksize * num_cores):
for result in view.map(function, queue):
yield result
queue = []
for result in view.map(function, queue):
yield result
client = IPython.parallel.Client()
lbview = client.load_balanced_view()
longstring = product('abc', repeat=3)
substrings = product('abc', repeat=2)
for result in imap(stringcount, gen_pairs(longstring, substrings), lbview):
print result
The output I'm seeing is on this Notebook: http://nbviewer.ipython.org/gist/driscoll/b8de4bf980de1ad890de