日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程语言 > python >内容正文

python

python获取网页数据对电脑性能_【Python】网页数据爬取实战

發布時間:2025/3/8 python 51 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python获取网页数据对电脑性能_【Python】网页数据爬取实战 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

由于網頁結構跟之前有變化,還不是很熟悉。代碼待完善,問題記錄:

騰訊新聞二級網頁內容爬取有問題。

鏈家網站頭文件沒有用到。

爬取一條騰訊視頻的header內容,存入txt。要求:

包含網頁鏈接

包含title

包含所有headers信息

import requests

from bs4 import BeautifulSoup

u = 'http://news.qq.com/a/20170205/023923.htm'

r = requests.get(url = u)

headers = r.headers #獲取網頁頭部信息

#print(headers)

soup = BeautifulSoup(r.text,'lxml')

title = soup.title.text #獲取網頁title內容

#print(title)

f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\header.txt','w',encoding='utf8')

f.seek(0)

# 創建一個txt文件

f.write('爬取網頁:'+str(u)+'\n')

f.write('新聞標題為:'+title+'\n')

for i in headers:

lst = [i,':',headers[i],'\n']

f.writelines(lst)

f.close()

print('finished!')

爬取騰訊新聞網站上,某一天的某類新聞標題。要求:

開頭:‘XX年XX月XX日騰訊新聞’

包括新聞標題和網址

(爬取每一條新聞的內容(二級標題))

import requests

from bs4 import BeautifulSoup

import re

u = 'http://news.qq.com/world_index.shtml'

r = requests.get(url = u)

soup = BeautifulSoup(r.text,'lxml')

f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news.txt','w',encoding='utf8')

f.seek(0)

# 創建一個txt文件

f.write('2018年8月26日騰訊新聞\n')

news = soup.find_all('a',href=re.compile('http://news.qq.com/a/20180825/'))

#print(news)

for i in news:

#print(i)

txt = i.text.strip()#strip() 用于去掉前后空格

if txt =='':

continue

else:

lst = [txt,',','url=',i.attrs['href']]

f.writelines(lst)

f.close()

print('finished!')

修改

import requests

from bs4 import BeautifulSoup

import re

u = 'http://news.qq.com/world_index.shtml'

r = requests.get(url = u)

soup = BeautifulSoup(r.text,'lxml')

f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news.txt','w',encoding='utf8')

f.seek(0)

# 創建一個txt文件

f.write('2018年8月26日騰訊新聞\n')

news = soup.find_all('a',href=re.compile('//new.qq.com/omn/20180826'))

#print(news)

for i in news:

#print(i)

txt = i.text.strip()#strip() 用于去掉前后空格

if txt =='':

continue

else:

lst = [txt,',','url=','http:',i.attrs['href'],'\n']

f.writelines(lst)

f.close()

print('finished!')

添加正文內容:

import requests

from bs4 import BeautifulSoup

import re

u = 'http://news.qq.com/world_index.shtml'

r = requests.get(url = u)

soup = BeautifulSoup(r.text,'lxml')

f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\news2.txt','w',encoding='utf8')

f.seek(0)

# 創建一個txt文件

f.write('2018年8月26日騰訊新聞\n')

news = soup.find_all('a',href=re.compile('http://news.qq.com/a/2018'))

#print(news)

for i in news:

#print(i)

txt = i.text.strip()#strip() 用于去掉前后空格

if txt =='':

continue

else:

ul = i.attrs['href']

ur = requests.get(url = ul)

usoup = BeautifulSoup(ur.text,'lxml')

f.write(txt+'\n')

#打印正文

f.write('正文如下:\n')

if usoup.body.attrs[id]=='P-QQ':#排除圖片新聞

continue

else:

p = usoup.find('div',id="Cnt-Main-Article-QQ").find_all('p')

for i in p:

print(i.text)

f.write(i.text+'\n')

f.write('\n')

f.close()

print('finished!')

爬蟲正確的習慣和邏輯

函數式爬取

用瀏覽器去訪問,headers信息

r = request.get(url='...',headers={...})以瀏覽器的形式向網頁進行請求

頭部信息

headers = input("粘貼頭部信息:")

lst = headers.split('\n')

m=[]

for i in lst:

key = i.split(':')[0]

value = i.split(':')[1]

m.append([str(key),str(value)])

print(dict(m))

def header_format(h):

"""

函數用于轉譯網頁headers信息

h:輸入的headers信息

"""

h = input("粘貼頭部信息:")

lst = h.split('\n')

m=[]

for i in lst:

key = i.split(':')[0]

value = i.split(':')[1]

m.append([str(key),str(value)])

return(dict(m))

print(header_format(headers))

用函數式寫法的優點:

閱讀性更強

函數的可復制性

便于修改

爬取一條騰訊視頻的header內容,存入txt。函數式編寫:

包含網頁鏈接

包含title

包含所有headers信息

爬取鏈家二手房數據-深圳

import requests

from bs4 import BeautifulSoup

import re

def url_analysis(u, h, s, n):

'''

用于分析網頁,最后得到一個含有二級網址的標簽列表

u:起始網址

h:頭部信息

s:二級網址包含特定字段

n:頁碼

'''

url_lst=[]

for i in range(1,n+1):

if i == 1:

r = requests.get(url=u+'nb1rs深圳/')

else:

r = requests.get(url=u+'pg'+str(i)+'nb1rs深圳/')

soup = BeautifulSoup(r.text,'lxml')

r2 = soup.find_all('a',href=re.compile(s))

for j in r2:

r3 = j.attrs['href']

url_lst.append(r3)

return(url_lst)

def content(u,h):

'''

爬取網頁標簽信息

u:爬取的二級網址

h:頭部信息

'''

r = requests.get(url=u)

r.encodinge = 'utf-8'

soup = BeautifulSoup(r.text,'lxml')

t = soup.title.text #爬取標題

toprice = soup.find('div',class_='price').find('span',class_='total').text

unprice = soup.find('div',class_='unitPrice').find('span',class_='unitPriceValue').text

area = soup.find('div',class_='area').find('div',class_='mainInfo').text

base = soup.find('div',class_='base').find('div',class_='content').find_all('li')

year = base[-1].text

pattern = 'resblockPosition:\'(.*?)\',' #.*?任意字符

position =re.search(pattern,r.text).group(1)

lng = position.split(',')[0]

lat = position.split(',')[1]

return([t,',', toprice,',', unprice,',', area,',', year,',',lng,',',lat,'\n'])

if __name__ == '__main__': #main函數

web_u = 'https://sz.lianjia.com/ershoufang/'

web_h = {

'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',

'Accept-Encoding': 'gzip, deflate, br',

'Accept-Language': 'zh-CN,zh;q=0.9',

'Connection': 'keep-alive',

'Cookie': 'TY_SESSION_ID=93f5b43a-5dc9-4d96-b57a-a4eb78f8dc15; lianjia_uuid=614ed9e0-dc25-421f-ba8b-141c574dbb47; _smt_uid=5b80defd.8430805; UM_distinctid=1656f670d3e4ff-02814a7ed21053-b34356b-1fa400-1656f670d3fdd7; _jzqx=1.1535172349.1535172349.1.jzqsr=bj%2Elianjia%2Ecom|jzqct=/.-; _ga=GA1.2.50227061.1535172352; ljref=pc_sem_baidu_ppzq_x; lianjia_ssid=dbe87b29-353a-45c2-97cf-aae666e2771b; Hm_lvt_9152f8221cb6243a53c83b956842be8a=1535172349,1535201139,1535358484; _jzqa=1.3976151446564617700.1535172349.1535201139.1535358484.3; _jzqc=1; _jzqy=1.1535201139.1535358484.1.jzqsr=baidu|jzqct=%E9%93%BE%E5%AE%B6%E7%BD%91.-; _jzqckmp=1; _gid=GA1.2.1182771159.1535358486; select_city=440300; all-lj=c32edd623b8a5a59c7de54c92107bb6c; _qzjc=1; CNZZDATA1255849469=275538323-1535355329-%7C1535355329; CNZZDATA1254525948=1806440598-1535354494-%7C1535354494; CNZZDATA1255633284=72361912-1535358081-%7C1535358081; CNZZDATA1255604082=1229464985-1535356409-%7C1535356409; Hm_lpvt_9152f8221cb6243a53c83b956842be8a=1535359605; _qzja=1.1736056849.1535358739249.1535358739249.1535358739249.1535359600160.1535359605575.0.0.0.10.1; _qzjb=1.1535358739249.10.0.0.0; _qzjto=10.1.0; _jzqb=1.15.10.1535358484.1',

'Host': 'sz.lianjia.com',

'Referer': 'https',

'Upgrade-Insecure-Requests': '1',

'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'

}

web_s = 'https://sz.lianjia.com/ershoufang/105'

web_n = 3

f = open('C:\\Users\\Administrator\\Desktop\\lianxi\\lianjia.txt','w')

f.seek(0)

f.write('title,total_price萬元,unprice元/平方米,area平方米,產權年限,lng,lat\n')

for i in url_analysis(web_u, web_h, web_s, web_n):

data = content(i,web_h)

f.writelines(data)

print(data)

f.close()

print('finished!')

總結

以上是生活随笔為你收集整理的python获取网页数据对电脑性能_【Python】网页数据爬取实战的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。