随着信息时代的到来,越来越多的企业利用大数据技术来获取公开的行业信息,大量的数据就离不开爬虫技术,企业在使用爬虫爬取数据时往往会被目标网站限制ip,下面就是我要介绍的企业做数据抓取时,如何使用爬虫ip解决网站封IP的问题。
#coding:utf-8
import urllib2
def url_user_agent(url):
#设置使用华科爬虫ip
proxy = {'http':'27.24.158.155:84'}
proxy_support = urllib2.ProxyHandler(proxy)
# opener = urllib2.build_opener(proxy_support,urllib2.HTTPHandler(debuglevel=1))
opener = urllib2.build_opener(proxy_support)
urllib2.install_opener(opener)
#添加头信息,模仿浏览器抓取网页,对付返回403禁止访问的问题
# i_headers = {'User-Agent':'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'}
i_headers = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/31.0.1650.48'}
req = urllib2.Request(url,headers=i_headers)
html = urllib2.urlopen(req)
if url == html.geturl():
doc = html.read()
return doc
return
url = 'http://www.dianping.com/search/category/2/10/g311'
doc = url_user_agent(url)
print doc