日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程语言 > python >内容正文

python

python 抓取“煎蛋妹子图”并分页存在本地(普通版和多线程版)

發布時間:2025/3/14 python 49 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python 抓取“煎蛋妹子图”并分页存在本地(普通版和多线程版) 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

  想測試一下python多線程爬蟲的效率,就寫了個爬蟲程序的多線程版和普通版。測試了一下。發現單線程比多線程還快,不理解。看來有兩種可能,一是我寫的多線程爬蟲有問題,二是python對多線程的支持有局限。

  暫時存在這里,希望以后能解決。

  爬蟲單線程版:

  

1 #coding=utf-8 2 import sys 3 reload(sys) 4 sys.setdefaultencoding('utf-8') 5 6 import requests 7 import re 8 import os 9 from threading import Thread 10 import urllib 11 from time import ctime 12 13 class crawl_girls(object): 14 """docstring for crawl_girls""" 15 def __init__(self, url, pagenum): 16 self.url = url 17 self.pagenum = pagenum 18 self.content = "" 19 self.img_urls = [] 20 self.img_names = [] 21 22 def getContent(self): 23 try: 24 imgs_html = requests.get(self.url) 25 imgs_html_content = imgs_html.content 26 self.content = imgs_html_content 27 #print self.content 28 except requests.exceptions.RequestException, e: 29 print e 30 31 def getImgNames(self): 32 img_names_patt = r'<li id="comment-(.+?)">' 33 self.img_names = re.findall(img_names_patt, self.content) 34 35 def getImgUrls(self): 36 img_urls_patt = r'<p><img src="(.+?)"' 37 self.img_urls = re.findall(img_urls_patt, self.content) 38 39 def start_download(self): 40 self.getContent() 41 self.getImgNames() 42 self.getImgUrls() 43 44 curr_path = os.getcwd() 45 curr_path = curr_path.replace('\\', '/') 46 curr_path = curr_path + '/' 47 file_dir = curr_path + str(self.pagenum) + '/' 48 os.mkdir(file_dir) 49 50 for name_url in zip(self.img_names, self.img_urls): 51 pic_name = name_url[1][-4:] 52 file_path = file_dir + name_url[0] + pic_name 53 #print 'start download',file_path 54 print 'starting at', ctime() 55 urllib.urlretrieve(name_url[1], file_path) 56 print 'finished at', ctime() 57 58 59 def main(page_start, page_end): 60 page = r'http://jandan.net/ooxx/page-1#comments' 61 for pagenum in range(page_start, page_end+1): 62 url = page.replace('1', str(pagenum)) 63 print url 64 girls = crawl_girls(url, pagenum) 65 girls.start_download() 66 67 print "all Done" 68 69 if __name__ == '__main__': 70 main(905, 906)

  爬蟲多線程版:

  

1 #coding=utf-8 2 import sys 3 reload(sys) 4 sys.setdefaultencoding('utf-8') 5 6 import requests 7 import re 8 import os 9 from threading import Thread 10 import urllib 11 from time import ctime 12 13 class crawl_girls(object): 14 """docstring for crawl_girls""" 15 def __init__(self, url, pagenum): 16 self.url = url 17 self.pagenum = pagenum 18 self.content = "" 19 self.img_urls = [] 20 self.img_names = [] 21 22 def getContent(self): 23 try: 24 imgs_html = requests.get(self.url) 25 imgs_html_content = imgs_html.content 26 self.content = imgs_html_content 27 #print self.content 28 except requests.exceptions.RequestException, e: 29 print e 30 31 def getImgNames(self): 32 img_names_patt = r'<li id="comment-(.+?)">' 33 self.img_names = re.findall(img_names_patt, self.content) 34 35 def getImgUrls(self): 36 img_urls_patt = r'<p><img src="(.+?)"' 37 self.img_urls = re.findall(img_urls_patt, self.content) 38 39 def start_thread(self): 40 self.getContent() 41 self.getImgNames() 42 self.getImgUrls() 43 44 curr_path = os.getcwd() 45 curr_path = curr_path.replace('\\', '/') 46 curr_path = curr_path + '/' 47 file_dir = curr_path + str(self.pagenum) + '/' 48 os.mkdir(file_dir) 49 50 for name_url in zip(self.img_names, self.img_urls): 51 pic_name = name_url[1][-4:] 52 file_path = file_dir + name_url[0] + pic_name 53 print 'start download',file_path 54 print 'starting at', ctime() 55 thread = download_threads(name_url[1], file_path) 56 thread.start() 57 thread.join() 58 print 'finished at', ctime() 59 60 class download_threads(Thread): 61 def __init__(self, url, path): 62 Thread.__init__(self) 63 self.url = url 64 self.path = path 65 66 def run(self): 67 urllib.urlretrieve(self.url, self.path) 68 69 def main(page_start, page_end): 70 page = r'http://jandan.net/ooxx/page-1#comments' 71 for pagenum in range(page_start, page_end+1): 72 url = page.replace('1', str(pagenum)) 73 print url 74 girls = crawl_girls(url, pagenum) 75 girls.start_thread() 76 77 print "all Done" 78 79 if __name__ == '__main__': 80 main(905, 906)

?

  

轉載于:https://www.cnblogs.com/lkprof/p/3267039.html

總結

以上是生活随笔為你收集整理的python 抓取“煎蛋妹子图”并分页存在本地(普通版和多线程版)的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。