日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程语言 > python >内容正文

python

python爬取图书信息_Python爬取当当、京东、亚马逊图书信息代码实例

發布時間:2023/12/31 python 33 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python爬取图书信息_Python爬取当当、京东、亚马逊图书信息代码实例 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

注:1.本程序采用MSSQLserver數據庫存儲,請運行程序前手動修改程序開頭處的數據庫鏈接信息

2.需要bs4、requests、pymssql庫支持

3.支持多線程

from bs4 import BeautifulSoup

import re,requests,pymysql,threading,os,traceback

try:

conn = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='root', db='book',charset="utf8")

cursor = conn.cursor()

except:

print('\n錯誤:數據庫連接失敗')

#返回指定頁面的html信息

def getHTMLText(url):

try:

headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}

r = requests.get(url,headers = headers)

r.raise_for_status()

r.encoding = r.apparent_encoding

return r.text

except:

return ''

#返回指定url的Soup對象

def getSoupObject(url):

try:

html = getHTMLText(url)

soup = BeautifulSoup(html,'html.parser')

return soup

except:

return ''

#獲取該關鍵字在圖書網站上的總頁數

def getPageLength(webSiteName,url):

try:

soup = getSoupObject(url)

if webSiteName == 'DangDang':

a = soup('a',{'name':'bottom-page-turn'})

return a[-1].string

elif webSiteName == 'Amazon':

a = soup('span',{'class':'pagnDisabled'})

return a[-1].string

except:

print('\n錯誤:獲取{}總頁數時出錯...'.format(webSiteName))

return -1

class DangDangThread(threading.Thread):

def __init__(self,keyword):

threading.Thread.__init__(self)

self.keyword = keyword

def run(self):

print('\n提示:開始爬取當當網數據...')

count = 1

length = getPageLength('DangDang','http://search.dangdang.com/?key={}'.format(self.keyword))#總頁數

tableName = 'db_{}_dangdang'.format(self.keyword)

try:

print('\n提示:正在創建DangDang表...')

cursor.execute('create table {} (id int ,title text,prNow text,prPre text,link text)'.format(tableName))

print('\n提示:開始爬取當當網頁面...')

for i in range(1,int(length)):

url = 'http://search.dangdang.com/?key={}&page_index={}'.format(self.keyword,i)

soup = getSoupObject(url)

lis = soup('li',{'class':re.compile(r'line'),'id':re.compile(r'p')})

for li in lis:

a = li.find_all('a',{'name':'itemlist-title','dd_name':'單品標題'})

pn = li.find_all('span',{'class': 'search_now_price'})

pp = li.find_all('span',{'class': 'search_pre_price'})

if not len(a) == 0:

link = a[0].attrs['href']

title = a[0].attrs['title'].strip()

else:

link = 'NULL'

title = 'NULL'

if not len(pn) == 0:

prNow = pn[0].string

else:

prNow = 'NULL'

if not len(pp) == 0:

prPre = pp[0].string

else:

prPre = 'NULL'

sql = "insert into {} (id,title,prNow,prPre,link) values ({},'{}','{}','{}','{}')".format(tableName,count,title,prNow,prPre,link)

cursor.execute(sql)

print('\r提示:正在存入當當數據,當前處理id:{}'.format(count),end='')

count += 1

conn.commit()

except:

pass

class AmazonThread(threading.Thread):

def __init__(self,keyword):

threading.Thread.__init__(self)

self.keyword = keyword

def run(self):

print('\n提示:開始爬取亞馬遜數據...')

count = 1

length = getPageLength('Amazon','https://www.amazon.cn/s/keywords={}'.format(self.keyword))#總頁數

tableName = 'db_{}_amazon'.format(self.keyword)

try:

print('\n提示:正在創建Amazon表...')

cursor.execute('create table {} (id int ,title text,prNow text,link text)'.format(tableName))

print('\n提示:開始爬取亞馬遜頁面...')

for i in range(1,int(length)):

url = 'https://www.amazon.cn/s/keywords={}&page={}'.format(self.keyword,i)

soup = getSoupObject(url)

lis = soup('li',{'id':re.compile(r'result_')})

for li in lis:

a = li.find_all('a',{'class':'a-link-normal s-access-detail-page a-text-normal'})

pn = li.find_all('span',{'class': 'a-size-base a-color-price s-price a-text-bold'})

if not len(a) == 0:

link = a[0].attrs['href']

title = a[0].attrs['title'].strip()

else:

link = 'NULL'

title = 'NULL'

if not len(pn) == 0:

prNow = pn[0].string

else:

prNow = 'NULL'

sql = "insert into {} (id,title,prNow,link) values ({},'{}','{}','{}')".format(tableName,count,title,prNow,link)

cursor.execute(sql)

print('\r提示:正在存入亞馬遜數據,當前處理id:{}'.format(count),end='')

count += 1

conn.commit()

except:

pass

class JDThread(threading.Thread):

def __init__(self,keyword):

threading.Thread.__init__(self)

self.keyword = keyword

def run(self):

print('\n提示:開始爬取京東數據...')

count = 1

tableName = 'db_{}_jd'.format(self.keyword)

try:

print('\n提示:正在創建JD表...')

cursor.execute('create table {} (id int,title text,prNow text,link text)'.format(tableName))

print('\n提示:開始爬取京東頁面...')

for i in range(1,100):

url = 'https://search.jd.com/Search?keyword={}&page={}'.format(self.keyword,i)

soup = getSoupObject(url)

lis = soup('li',{'class':'gl-item'})

for li in lis:

a = li.find_all('div',{'class':'p-name'})

pn = li.find_all('div',{'class': 'p-price'})[0].find_all('i')

if not len(a) == 0:

link = 'http:' + a[0].find_all('a')[0].attrs['href']

title = a[0].find_all('em')[0].get_text()

else:

link = 'NULL'

title = 'NULL'

if(len(link) > 128):

link = 'TooLong'

if not len(pn) == 0:

prNow = '¥'+ pn[0].string

else:

prNow = 'NULL'

sql = "insert into {} (id,title,prNow,link) values ({},'{}','{}','{}')".format(tableName,count,title,prNow,link)

cursor.execute(sql)

print('\r提示:正在存入京東網數據,當前處理id:{}'.format(count),end='')

count += 1

conn.commit()

except :

pass

def closeDB():

global conn,cursor

conn.close()

cursor.close()

def main():

print('提示:使用本程序,請手動創建空數據庫:Book,并修改本程序開頭的數據庫連接語句')

keyword = input("\n提示:請輸入要爬取的關鍵字:")

dangdangThread = DangDangThread(keyword)

amazonThread = AmazonThread(keyword)

jdThread = JDThread(keyword)

dangdangThread.start()

amazonThread.start()

jdThread.start()

dangdangThread.join()

amazonThread.join()

jdThread.join()

closeDB()

print('\n爬取已經結束,即將關閉....')

os.system('pause')

main()

示例截圖:

關鍵詞:Android下的部分運行結果(以導出至Excel)

總結

以上就是本文關于Python爬取當當、京東、亞馬遜圖書信息代碼實例的全部內容,希望對大家有所幫助。感興趣的朋友可以繼續參閱本站:

python爬取亞馬遜書籍信息代碼分享

Python爬蟲實例爬取網站搞笑段子

Python探索之爬取電商售賣信息代碼示例

如有不足之處,歡迎留言指出。感謝朋友們對本站的支持!

總結

以上是生活随笔為你收集整理的python爬取图书信息_Python爬取当当、京东、亚马逊图书信息代码实例的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。