Can someone please explain why:
import networkx as nx
import time
import multiprocessing as mp
def work(item):
pths = []
for path in nx.all_simple_paths(G, item, endNode,cutoff=maxLen-1):
path.insert(0,startNode)
pths.append(path)
return pths
def init_worker(Gr, st,ed, cl):
global G, startNode, endNode, maxLen
print "process initializing", mp.current_process()
G, startNode, endNode, maxLen = Gr, st, ed, cl
G.remove_node(startNode)
def multi_core(graph, sourceChem, sinkChem, maxLen, minLen):
paths = []
startNeighbours = graph[sourceChem].keys()
p = mp.Pool(initializer=init_worker, initargs=(graph,sourceChem, sinkChem, 9))
paths = p.map(work, startNeighbours, chunksize=(len(startNeighbours)/mp.cpu_count()))
for i in paths:
if len(i) != 0:
for j in i:
paths.append(j)
p.close()
p.join()
return paths
if __name__ == "__main__":
print multi_core(nx.read_graphml("RESTKegg.graphml"), 'C00025', 'C00029', 4, 2)
Runs forever at 100% core usage across all cores? I've used code similar to this but with out a return
in multi_core, is that what is causing the problem?
Edit: Running:
graph = nx.read_graphml("minimal.graphml")
startNeighbours = graph['C00030'].keys()
init_worker(graph, 'C00030', 'C00072', 4)
for i in startNeighbours:
work(i)
As an unthreaded version returns fine.
Update: I built a test bed to remove the code from the rest of the program and it appears that the code works outside the program fine, and that it appears to be having problems with my Kivy front-end.