【工具】js脚本下载百度文库生成word文本 + python爬取百度文库
生活随笔
收集整理的這篇文章主要介紹了
【工具】js脚本下载百度文库生成word文本 + python爬取百度文库
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
js腳本
在開發人員工具中復制粘貼按回車鍵即可
//1.獲取文本 let topDiff = -1; let content = ""; var filename= document.getElementsByClassName('doc-title')[0].innerText;const target = document.querySelectorAll(".reader-word-layer"); target.forEach(x => {if (x.style.top !== topDiff) {content += "\n";topDiff = x.style.top;};content += x.innerText; }); //2.創建下載鏈接,下載到本地 var element = document.createElement('a');element.setAttribute('href', 'data:text/plain;charset=utf-8,' + encodeURIComponent(content));element.setAttribute('download', filename+".doc");element.style.display = ".reader-word-layer";document.body.appendChild(element);element.click();document.body.removeChild(element);是之前記錄的,原文是其他文章,不記得是哪個了
python爬取
import os import re import json import requests from urllib.request import urlretrieveclass BaiduWk:def __init__(self):self.list_info = []self.session = requests.session()self.headers = {'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 \(KHTML, like Gecko) Chrome/80.0.3987.87 Mobile Safari/537.36'}# 獲取網頁源代碼的數據def get_html(self, start_url):response = self.session.get(start_url, headers=self.headers)response.encoding = response.apparent_encodingreturn response.text# 獲取文檔標題, 提取請求參數def parse_html(self, data): re_title = re.findall("'title': '(.*?)',", data)title = re_title[0] if re_title else re.findall('<title>(.*?)</title>', data)[0]params = {'bucketNum': re.findall(r'"bucketNum":(\d+)', data)[0],'md5sum': re.findall('md5sum=(.*?)&', data)[0],'sign': re.findall('sign=(.*?)&', data)[0],'rtcs_flag': re.findall('rtcs_flag=(.*?)&', data)[0],'rtcs_ver': re.findall('rtcs_ver=(.*?)&', data)[0],'rsign': re.findall('"rsign":"(.*?)"', data)[0], }# 提取頁碼列表page_range = re.findall(r'{"page":\d+,"range":"(.*?)"}', data)return params, page_range, title# 以頁碼列表依次迭代def words_data(self, params, page_range):pages = len(page_range) + 1url = r'https://wkrtcs.bdimg.com/rtcs/webapp'for i in range(1, pages):print(f'正在解析第{i}頁數據,飛速讀取中...')# 添加所需的頁碼信息params['pn'] = iparams['range'] = page_range[i - 1]response = self.session.get(url, params=params).textyield response# 解析文章數據def get_words(self, response):pages = 1for data in response:# 轉化為json數據a = data[5:-1]text = ''d = json.loads(a)# 提取 c鍵 的文本數據for j in d['document.xml']:for c in j['c']:text += '\n'for c2 in c['c']:try:text += c2['c'] + '\n'except:continuetext += f'\n------------------------當前第{pages}頁-------------------------\n'pages += 1self.list_info.append(text)# 保存文件def save_info(self, title, path): os.makedirs('百度文庫', exist_ok=True)with open(path, 'w', encoding='utf-8') as f:f.writelines(self.list_info)def get_img(self, start_url):print('開始嘗試解析百度文庫圖片...\n')r = self.session.get(start_url)r.encoding = r.apparent_encodingtitle = re.findall("'title': '(.*?)'", r.text)[0]print(title)docId = re.findall("'docId': '(.*?)'", r.text)[0]totalPageNum = re.findall("'totalPageNum': '(.*?)'", r.text)[0]totalPageNum = int(totalPageNum) + 1return totalPageNum, title, docIddef download_img(self, totalPageNum, title, docId):for pn in range(1, totalPageNum):params = {'doc_id': docId, 'pn': pn, 'rn': 1, 'type': 'ppt', }api_url = 'https://wenku.baidu.com/browse/getrequest'r = self.session.get(api_url, params=params, headers=self.headers)src = r.json()[0].get('zoom')os.makedirs(title, exist_ok=True)path = title + '/' + str(pn) + '.jpg'urlretrieve(src, path)print(f'正在提取第{pn}頁,請稍等...')# 文章去重def set_word(self, path):word_set = list()with open(path, 'r', encoding='utf-8') as f:for each_line in f:word_set.append(each_line)result = list(set(word_set))result.sort(key=word_set.index)with open(path, 'w', encoding='utf-8') as f:f.writelines(result)print('done')# 獲取文字內容def run_word(self):print('開始嘗試解析百度文庫頁面...\n')start_url = input('輸入百度文庫中的連接:')print('running...\n')start_url = re.sub('wenku', 'wk', start_url)html = self.get_html(start_url)param, ranges, title = self.parse_html(html)print(f'當前文章:{title}\n')path = '百度文庫/' + title + '.doc'response = self.words_data(param, ranges)self.get_words(response)self.save_info(title, path)self.set_word(path)print('done!!!')print('程序執行完畢!')# 獲取圖片數據def run_img(self):print('開始嘗試解析百度文庫圖片信息...\n')start_url = input('輸入百度文庫中的連接:')print('running...\n')totalPageNum, title, docId = self.get_img(start_url)self.download_img(totalPageNum, title, docId)print('done!!!')print('程序執行完畢!')if __name__ == '__main__':wk = BaiduWk()wk.run_word()# wk.run_img()在pycharm里面運行即可,也是很久之前轉的代碼,由于自己只記錄了代碼,原文鏈接沒保存…不過之前經常用
總結
以上是生活随笔為你收集整理的【工具】js脚本下载百度文库生成word文本 + python爬取百度文库的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: RocketMQ消息重试机制
- 下一篇: 你所忽略的,覆盖equals时需要注意的