requests、bs4总结和作业
一、requests的用法
requests是Python用于网络(http)请求的第三库,也是爬虫获取网络数据的重要工具
1. 向目标地址(网络地址)发送请求
requests.get(url,*,headers, proxies, timeout) - 以指定的方式给地址发送网络请求,返回值是服务器返回的响应对象
参数说明:
url - 字符串;请求的网络地址,可能是目标网站的网址也可能是数据接口
headers - 字典;请求头,用于伪装浏览器设置user-agent、完成自动登录设置cookie
proxies - 字典;设置代理ip(ip地址被封的时候使用)
timeout - 数字;设置超时时间
url = 'https://search.51job.com/list/000000,000000,0000,00,9,99,数据分析,2,1.html?lang=c&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&ord_field=0&dibiaoid=0&line=&welfare='
headers = {
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
response = requests.get(url, headers=headers)
print(response.headers)
response.encoding = 'gbk'
print(response.text)
print(response.content)
print(response.json())
二、爬网页
import requests
url = 'https://cd.zu.ke.com/zufang'
response = requests.get(url)
print(response.text)
三、下载图片
import requests
url = 'https://www.baidu.com/img/PCtm_d9c8750bed0b3c7d089fa7d55720d6cf.png'
response = requests.get(url)
open('files/a.png', 'wb').write(response.content)
四、json接口数据
import requests
import os
url = 'https://game.gtimg.cn/images/lol/act/img/js/heroList/hero_list.js'
response = requests.get(url)
count = 0
for hero in response.json()['hero']:
print(hero['name'])
a_url = hero['selectAudio']
res = requests.get(a_url)
open(f'files/{os.path.basename(a_url)}', 'bw').write(res.content)
count += 1
if count == 10:
break
五、bs4的用法
from bs4 import BeautifulSoup
注意:安装第三方库的时候安装beautifulSoup4而不是bs4
html = open('files/05css选择器(重要).html',encoding='utf-8').read()
soup = BeautifulSoup(html, 'lxml')
result = soup.select('p')
print(result)
result = soup.select('.c1')
print(result)
div1 = soup.select_one('#box')
result = div1.select('p')
print(result)
result = soup.select_one('#p2').text
print(result)
for p in soup.select('p'):
print(p.text)
result = soup.select_one('#a1').attrs
print(result)
result = soup.select_one('#a1').attrs['href']
print(result)
作业
import requests
from bs4 import BeautifulSoup
from re import search
import csv
import time
def get_one_page(page):
headers = {
'cookie': 'bid=g16urOELfcQ; douban-fav-remind=1; __gads=ID=3f57e16f48f82cf2-2222dd78d6d0003d:T=1646561716:RT=1646561716:S=ALNI_MZc-Jdw3ejofw4l88N8mdU7MdYYzg; ll="118318"; __utma=30149280.808328653.1646561718.1646561718.1646561718.1; __utmz=30149280.1646561718.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); ct=y; dbcl2="222590700:fEe+R6RmPAA"; ck=4rf3; push_noty_num=0; push_doumail_num=0',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/99.0.4844.51 Safari/537.36'
}
url = f'https://movie.douban.com/top250?start={page * 25}&filter='
response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'lxml')
all_li = soup.select('.grid_view>li')
all_data = []
for li in all_li:
name = li.select_one('.title').text
result = li.select_one('.bd>p').text.strip().replace(' ', '')
director = search(r'导演:(.+?)\s', result).group(1)
actor = search(r'主演:(.+?)\s', result)
if actor:
actor = actor.group(1)
time = search(r'\n(\d+).*?/', result).group(1)
country = search(r'\n.+?/\s*(.+?)\s*/', result).group(1)
type = search(r'/.+?/\s*(.+?)$', result).group(1)
score = li.select_one('.rating_num').text
comment_num = li.select('.star>span')[-1].text
comment_num = search(r'\d+', comment_num).group()
img_url = li.select_one('.pic img').attrs['src']
all_data.append([name, director, actor, time, country, type, score, comment_num, img_url])
writer = csv.writer(open('files/电影.csv', 'a', encoding='utf-8', newline=''))
if page == 0:
writer.writerow(['电影名称', '导演', '主演', '上映时间', '国家', '类型', '评分', '评论人数', '封面地址'])
writer.writerows(all_data)
print(f'==========第{page+1}页获取成功===========')
if __name__ == '__main__':
for page in range(10):
get_one_page(page)
time.sleep(1)