0
点赞
收藏
分享

微信扫一扫

量化交易之python篇 - request - 网络爬虫(百度贴吧)


import requests

class TiebaSpider:
def __init__(self, tieba_name, pages=0):
self.page = 0
self.tieba_name = tieba_name
self.url_name_model = self.__get_url_name(self.tieba_name, self.page)
self.url_list = self.get_url_list(tieba_name, pages)
self.headers = {"User-Agent": 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/85.0.4183.83 Safari/537.36'}

def __get_url_name(self, tieba_name, page):
# 第1页: "https://tieba.baidu.com/f?kw=%B4%A9%D4%BD%BB%F0%CF%DF&fr=ala0&tpl=5"
page_0 = "https://tieba.baidu.com/f?kw={}&fr=ala0&tpl=5".format(tieba_name)
page_other = "https://tieba.baidu.com/f?kw={}&ie=utf-8&pn={}".format(tieba_name, page * 50)

if 0 == page:
self.url_name_model = page_0
else:
self.url_name_model = page_other

return self.url_name_model

# 根据贴吧名字, 获取网址
def get_url_list(self, tieba_name, pages):
new_list = []

if pages < 0:
print(tieba_name + "(pages) 不能是 0.")
pages = 0

for i in range(pages):
new_url = self.__get_url_name(tieba_name, i)
new_list.append(new_url)

return new_list

# 发送请求, 获取响应
def parse_url(self, url):
response = requests.get(url, headers=self.headers)
return response.content.decode()

# 保存html字符串
def save_html_string(self, html_string, page_number):
file_path = "{}-第{}页.html".format(self.tieba_name, page_number)
with open(file_path, "w", encoding="UTF-8") as file:
file.write(html_string)

def run(self):
for url_name in self.url_list:
html_string = self.parse_url(url=url_name)

# 保存
page_number = self.url_list.index(url_name) + 1
self.save_html_string(html_string=html_string, page_number=page_number)


if __name__ == '__main__':
# tieba_spider = TiebaSpider(tieba_name="穿越火线", pages=5)
tieba_spider = TiebaSpider(tieba_name="地下城与勇士", pages=7)
tieba_spider.run()

举报

相关推荐

0 条评论