时间:2023-05-24 05:18:01 | 来源:网站运营
时间:2023-05-24 05:18:01 来源:网站运营
爬虫系列教程五:动态网页api分析实例之爬取dropbox上的pdf:import requestsfrom bs4 import BeautifulSoupimport osimport re# use proxies to speed upproxies = {"http": "socks5://127.0.0.1:10808","https": "socks5://127.0.0.1:10808",}data = requests.get("https://aisecure.github.io/TEACHING/cs598.html")soup = BeautifulSoup(data.content, 'html.parser')entries = soup.find_all("tr")entries = list(entries)for k, i in enumerate(entries[1:]): i = str(i) entry_data = BeautifulSoup(i, 'html.parser') date_and_sides = entry_data.find_all(class_="tg-0pky") readings = entry_data.find_all(class_="tg-reading") if date_and_sides!= []: date_list = str.split(date_and_sides[0].string, '/') print(date_list) month = date_list[0] day = date_list[1] if len(month)==1: month = '0'+month if len(day)==1: day = '0'+day month_day = month+day print(month_day) if len(date_and_sides) == 2: if not os.path.exists(month_day): os.mkdir(month_day) if readings!=[]:#readings readings = str(readings[0]) readings = BeautifulSoup(readings, 'html.parser') for link in readings.find_all('a'): pdf_link = link.get('href') if 'http' not in pdf_link: break elif 'openreview' in pdf_link: pdf_link = pdf_link.replace('forum', 'pdf') elif 'arxiv' in pdf_link and 'pdf' not in pdf_link: pdf_link = pdf_link + '.pdf' print(pdf_link) pdf_name = pdf_link.split('/')[-1] if '.pdf' not in pdf_name: pdf_name = pdf_name.split('=')[-1]+'.pdf' print(pdf_name) pdf_data = requests.get(pdf_link, proxies=proxies) f = open(month_day+'/'+pdf_name,'wb') f.write(pdf_data.content) f.close() slides = date_and_sides[1]#slides slides = str(slides) slides = BeautifulSoup(slides, 'html.parser') for link in slides.find_all('a'): pdf_link = link.get('href') print(pdf_link) if 'dropbox' in pdf_link and k!=9 and k!=10:#k==9, slide file is in google driver, we don't have access to it;10 file not exits url = 'https://www.dropbox.com/sharing/fetch_user_content_link' cookies = {'__Host-ss':'bcD4Chza3M', 'locale':'zh_CN', 'gvc':'MTQxMzI3NDU0NjU2NzAyODExNDM4MzQ3NTk2NDExMjgyNjc2MzI2', 't':'-VB7vYgNnBuMG3LhS_GfEzTL', '__Host-js_csrf':'-VB7vYgNnBuMG3LhS_GfEzTL', 'seen-sl-signup-modal':'VHJ1ZQ%3D%3D', 'seen-sl-download-modal':'VHJ1ZQ%3D%3D'} data={ 'is_xhr': 'true', 't': '-VB7vYgNnBuMG3LhS_GfEzTL', 'url': pdf_link } slide_data = requests.post(url, data=data, proxies = proxies, cookies = cookies) middle_url = str(slide_data.content) print(middle_url) middle_url = middle_url.split('?')[0] middle_url = middle_url[2:] data_2={ '_download_id':'013885563736029338651059959499724834269999834692877836324471532568', '_notify_domain':'www.dropbox.com', 'dl':'1' } pdf_data = requests.get(middle_url, data=data_2, proxies = proxies) pdf_name = pdf_link.split('/')[-1] pdf_name = pdf_name.split('?')[0] print(pdf_name) f = open(month_day+'/'+pdf_name,'wb') f.write(pdf_data.content) f.close() elif len(date_and_sides) == 1: if not os.path.exists(month_day): os.mkdir(month_day) if readings!=[]:#readings readings = str(readings[0]) readings = BeautifulSoup(readings, 'html.parser') for link in readings.find_all('a'): pdf_link = link.get('href') if 'http' not in pdf_link: break elif 'openreview' in pdf_link: pdf_link = pdf_link.replace('forum', 'pdf') elif 'arxiv' in pdf_link and 'pdf' not in pdf_link: pdf_link = pdf_link + '.pdf' print(pdf_link) pdf_name = pdf_link.split('/')[-1] if '.pdf' not in pdf_name: pdf_name = pdf_name.split('=')[-1]+'.pdf' print(pdf_name) pdf_data = requests.get(pdf_link, proxies=proxies) f = open(month_day+'/'+pdf_name,'wb') f.write(pdf_data.content) f.close() slides = date_and_sides[1]#slides slides = str(slides) slides = BeautifulSoup(slides, 'html.parser') for link in slides.find_all('a'): pdf_link = link.get('href') print(pdf_link) if 'dropbox' in pdf_link and k!=9 and k!=10:#k==9, slide file is in google driver, we don't have access to it;10 file not exits url = 'https://www.dropbox.com/sharing/fetch_user_content_link' cookies = {'__Host-ss':'bcD4Chza3M', 'locale':'zh_CN', 'gvc':'MTQxMzI3NDU0NjU2NzAyODExNDM4MzQ3NTk2NDExMjgyNjc2MzI2', 't':'-VB7vYgNnBuMG3LhS_GfEzTL', '__Host-js_csrf':'-VB7vYgNnBuMG3LhS_GfEzTL', 'seen-sl-signup-modal':'VHJ1ZQ%3D%3D', 'seen-sl-download-modal':'VHJ1ZQ%3D%3D'} data={ 'is_xhr': 'true', 't': '-VB7vYgNnBuMG3LhS_GfEzTL', 'url': pdf_link } slide_data = requests.post(url, data=data, proxies = proxies, cookies = cookies) middle_url = str(slide_data.content) print(middle_url) middle_url = middle_url.split('?')[0] middle_url = middle_url[2:] data_2={ '_download_id':'013885563736029338651059959499724834269999834692877836324471532568', '_notify_domain':'www.dropbox.com', 'dl':'1' } pdf_data = requests.get(middle_url, data=data_2, proxies = proxies) pdf_name = pdf_link.split('/')[-1] pdf_name = pdf_name.split('?')[0] print(pdf_name) f = open(month_day+'/'+pdf_name,'wb') f.write(pdf_data.content) f.close()
另外本站的编辑器实在太拉跨了,直接放md文档还是有问题的,距离上一篇文章已经一年了,这个问题还是没有得到解决,这个专栏以后就在博客上写了。关键词:实例,分析,教程,系列,动态,爬虫