日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當(dāng)前位置: 首頁 > 编程语言 > python >内容正文

python

python爬虫科研数据提取_python爬虫数据提取四之pyquery

發(fā)布時間:2024/10/14 python 76 豆豆
生活随笔 收集整理的這篇文章主要介紹了 python爬虫科研数据提取_python爬虫数据提取四之pyquery 小編覺得挺不錯的,現(xiàn)在分享給大家,幫大家做個參考.

1 pyquery

簡介:同樣是一個強(qiáng)大的網(wǎng)頁解析工具 它提供了和jQuery類似的語法來解析HTML文檔,支持CSS選擇器,使用非常方便

2 pyquery基本用法

2.1 安裝

pip install pyquery

2.2 初始化

字符串初始化

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

print(doc('li'))

first itemsecond itemthird itemfourth itemfifth item

URL初始化

from pyquery import PyQuery as pq

doc = pq(url='http://www.baidu.com')

print(doc('head'))

?�??o|??�??�??�?? ?°±?�¥é��

文件初始化

from pyquery import PyQuery as pq

doc = pq(filename='test.html')

print(doc('li'))

11111111112222222222333333333344444444445555555555

2.3 基本CSS選擇器

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

print(doc('#container .list li'))

first itemsecond itemthird itemfourth itemfifth item

2.4 查找元素

2.4.1 子元素

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

items = doc('.list')

print(type(items))

print(items)

lis = items.find('li')

print(type(lis))

print(lis)

  • first item
  • second item
  • third item
  • fourth item
  • fifth item
first itemsecond itemthird itemfourth itemfifth item

lis = items.children()

print(type(lis))

print(lis)

first itemsecond itemthird itemfourth itemfifth item

lis = items.children('.active')

print(lis)

third itemfourth item

注意:這里的item-0和active是兩個類,在html中可以給同一元素設(shè)置兩個類,中間用空格隔開

2.4.2 父元素

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

items = doc('.list')

container = items.parent()

print(type(container))

print(container)

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

items = doc('.list')

parents = items.parents()

print(type(parents))

print(parents)

  • first item
  • second item
  • third item
  • fourth item
  • fifth item
  • first item
  • second item
  • third item
  • fourth item
  • fifth item

parent = items.parents('.wrap')

print(parent)

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

2.4.3 兄弟元素

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.list .item-0.active')

print(li.siblings())

second itemfirst itemfourth itemfifth item

可以看到,結(jié)果的順序并不是完全和輸入的順序一致

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.list .item-0.active') # 表示class為list的元素下的元素,這個元素的class為item-0和active

print(li.siblings('.active'))

fourth item

2.5 遍歷

單個元素

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.item-0.active')

print(li)

third item

多個元素

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

lis = doc('li').items()

print(type(lis))

for li in lis:

print(li)

first itemsecond itemthird itemfourth itemfifth item

2.6 獲取信息

獲取屬性

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

a = doc('.item-0.active a')

print(a)

print(a.attr('href'))

print(a.attr.href)

third item

link3.html

link3.html

獲取文本

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

a = doc('.item-0.active a')

print(a)

print(a.text())

third item

third item

獲取HTML

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.item-0.active')

print(li)

print(li.html())

third item

third item

2.7 DOM操作

addClass、removeClass

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.item-0.active')

print(li)

li.removeClass('active')

print(li)

li.addClass('active')

print(li)

third itemthird itemthird item

attr、css

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('.item-0.active')

print(li)

li.attr('name', 'link')

print(li)

li.css('font-size', '14px')

print(li)

third itemthird itemthird item

remove

html = '''

Hello, World

This is a paragraph.

'''

from pyquery import PyQuery as pq

doc = pq(html)

wrap = doc('.wrap')

print(wrap.text())

wrap.find('p').remove()

print(wrap.text())

Hello, World

This is a paragraph.

Hello, World

2.8 偽類選擇器

html = '''

  • first item
  • second item
  • third item
  • fourth item
  • fifth item

'''

from pyquery import PyQuery as pq

doc = pq(html)

li = doc('li:first-child')

print(li)

li = doc('li:last-child')

print(li)

li = doc('li:nth-child(2)')

print(li)

li = doc('li:gt(2)')# index > 2 的所有子元素,從0開始

print(li)

li = doc('li:nth-child(2n)')

print(li)

li = doc('li:contains(second)') # 根據(jù)文本選擇

print(li)

first itemfifth itemsecond itemfourth itemfifth itemsecond itemfourth itemsecond item

3 實(shí)戰(zhàn)---爬取百度校花吧(pyquery版)

import requests

from lxml import etree

import os

from pyquery import PyQuery as pq

class Baidu(object):

def __init__(self, name):

self.url = 'http://tieba.baidu.com/f?ie=utf-8&kw={}'.format(name)

# 使用較老版本的請求頭,該瀏覽器不支持js

self.headers = {

'User-Agent': 'Mozilla/4.0 (compatible; MSIE 5.01; Windows NT 5.0) '

}

# 發(fā)送請求,獲取響應(yīng)

def get_data(self, url):

response = requests.get(url,headers=self.headers)

return response.content

# 解析列表頁數(shù)據(jù),獲取列表頁面帖子的標(biāo)題和鏈接

def parse_list_page(self, data):

with open('baidu1.html','wb') as f:

f.write(data)

# 實(shí)例化etree對象

# html = etree.HTML(data).

doc = pq(data)

node_list= doc.find('.j_thread_list .threadlist_title a')

# 使用xpath語法,提取網(wǎng)頁數(shù)據(jù)

# node_list = html.xpath("//*[@id='thread_list']/li[@class=' j_thread_list clearfix']/div/div[2]/div[1]/div[1]/a")

# 判斷獲取結(jié)果

# print(len(node_list))

data_list = []

# 遍歷node_list

for node in node_list.items():

temp = {}

temp['url'] = 'http://tieba.baidu.com' + node.attr('href')

temp['title'] = node.text()

data_list.append(temp)

# 提取下一頁的節(jié)點(diǎn)

next_node = doc.find('#frs_list_pager .next').attr('href')

# next_node = html.xpath('//*[@id="frs_list_pager"]/a[last()-1]/@href')[0]

# print(next_node)

# 拼接下一頁的完整url

next_url = 'http:' + next_node

# print(next_url)

return data_list,next_url

def parse_detail_page(self, data_list):

html = etree.HTML(data_list)

doc = pq(data_list)

# 提取詳情頁面的圖片鏈接

imagelst = doc.find('.BDE_Image').items()

image_list = [img.attr('src') for img in imagelst]

# image_list = html.xpath("//cc/div[contains(@class,'d_post')]/img[@class='BDE_Image']/@src")

# 返回圖片節(jié)點(diǎn)列表

print(image_list)

return image_list

# 下載圖片,保存圖片文件

# 創(chuàng)建文件夾

def download(self, image_list):

if not os.path.exists('images1'):

os.makedirs('images1')

for image in image_list:

# os.sep在mac系統(tǒng)中是/,如果是windows系統(tǒng),\\,跨平臺

file_name = 'images1'+ os.sep + image.split('/')[-1]

image_data = self.get_data(image)

with open(file_name,'wb') as f:

f.write(image_data)

def run(self):

# 構(gòu)造url和請求頭

# 發(fā)送請求,獲取響應(yīng)

next_url = self.url

# 開啟循環(huán),

while next_url:

data = self.get_data(next_url)

# 解析列表頁數(shù)據(jù),返回的列表數(shù)據(jù)、下一頁的的數(shù)據(jù)

data_list,next_url = self.parse_list_page(data)

# 解析詳情頁的數(shù)據(jù),獲取詳情頁的圖片的鏈接地址

for data in data_list:

url = data['url']

result_list = self.get_data(url)

image_list = self.parse_detail_page(result_list)

# 保存數(shù)據(jù),下載圖片

self.download(image_list)

if __name__ == '__main__':

# 爬取百度校花吧所有圖片并存在文件夾中

baidu = Baidu('校花吧')

baidu.run()

總結(jié)

以上是生活随笔為你收集整理的python爬虫科研数据提取_python爬虫数据提取四之pyquery的全部內(nèi)容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網(wǎng)站內(nèi)容還不錯,歡迎將生活随笔推薦給好友。