import threading import Queue import urllib2 import time urls = ["http://www.google.com", \ "http://www.naver.com", \ "http://www.daum.net"] queue = Queue.Queue() class CustomThread(threading.Thread): def __init__(self, queue): threading.Thread.__init__(self) self.queue = queue def run(self): while True: task_url = self.queue.get() start = time.time() page = urllib2.urlopen(task_url) end = time.time() print """%s elapsed %s (from %s)""" \ % (task_url, end - start, self.getName()) self.queue.task_done() def main(): for i in range(2): t = CustomThread(queue) t.daemon = True t.start() for task_url in urls: queue.put(task_url) queue.join() print "Ended" if __name__ == "__main__": main()
This is typical producer-consumer model using queue. We can use this model when we want to keep threads running on though tasks are consumed up(like threadpool). Otherwise, we can call join() for each thread instead of calling queue.join().
No comments:
Post a Comment