python简单爬虫笔记
python模拟游览器爬取相关页面
import urllib.requesturl="https://blog.51cto.com/itstyle/2146899"#模拟浏览器headers=("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36")opener=urllib.request.build_opener()opener.addheaders=[headers]data=opener.open(url).read()fh=open("D:/5.html","wb")fh.write(data)fh.close()
python爬取新闻网站并将文章下载到本地
import urllib.requestimport urllib.errorimport redata=urllib.request.urlopen("http://news.sina.com.cn/").read()data2=data.decode("utf-8","ignore")pat='href="(http://news.sina.com.cn/.*?)">'allurl=re.compile(pat).findall(data2)for i in range(0,len(allurl)): try: print("第"+str(i+1)+"次爬取") thisurl=allurl[i] file="D:/pac/sinanews/"+str(i)+".html" urllib.request.urlretrieve(thisurl,file) print("-----成功-----") except urllib.error.URLError as e: if hasattr(e,"code"): print(e.code) if hasattr(e,"reason"): print(e.reason)
python爬取月光博客文章下载到本地
import reimport urllib.requestimport urllib.errordata=urllib.request.urlopen("http://www.williamlong.info/").read()pat='rel="bookmark">(.*?)</a>'pat_url='class="post-title"><a href="(http://www.williamlong.info/archives/.*?)"'data=data.decode("utf-8")mydata=re.compile(pat).findall(data) #所有的博客文章名allurl=re.compile(pat_url).findall(data) #所有的网址链接for i in range(0,len(allurl)): try: print("正在生产第"+str(i+1)+"次文件") thisurl=allurl[i] file="E:/PAS/yueguang/"+mydata[i]+".html" urllib.request.urlretrieve(thisurl,file) print("生产成功") except urllib.error.URLError as e: if hasattr(e,"code"): print(e.code) if hasattr(e,"reason"): print(e.reason)
声明:本站所有文章资源内容,如无特殊说明或标注,均为采集网络资源。如若本站内容侵犯了原著者的合法权益,可联系本站删除。