小编给大家分享一下python3解析html的方法,希望大家阅读完这篇文章后大所收获,下面让我们一起去探讨吧!

解析html是爬虫后的重要的一个处理数据的环节。一下记录解析html的几种方式。

先介绍基础的辅助函数,主要用于获取html并输入解析后的结束。

#把传递解析函数,便于下面的修改defget_html(url,paraser=bs4_paraser):headers={'Accept':'*/*','Accept-Encoding':'gzip,deflate,sdch','Accept-Language':'zh-CN,zh;q=0.8','Host':'www.360kan.com','Proxy-Connection':'keep-alive','User-Agent':'Mozilla/5.0(WindowsNT6.1;WOW64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/52.0.2743.116Safari/537.36'}request=urllib2.Request(url,headers=headers)response=urllib2.urlopen(request)response.encoding='utf-8'ifresponse.code==200:data=StringIO.StringIO(response.read())gzipper=gzip.GzipFile(fileobj=data)data=gzipper.read()value=paraser(data)#open('E:/h6/haPkY0osd0r5UB.html').read()returnvalueelse:passvalue=get_html('http://www.360kan.com/m/haPkY0osd0r5UB.html',paraser=lxml_parser)forrowinvalue:printrow

1、lxml.html的方式进行解析。

The lxml XML toolkit is a Pythonic binding for the C libraries libxml2 and libxslt. It is unique in that it combines the speed and XML feature completeness of these libraries with the simplicity of a native Python API, mostly compatible but superior to the well-known ElementTree API. The latest release works with all CPython versions from 2.6 to 3.5. See the introduction for more information about background and goals of the lxml project. Some common questions are answered in the FAQ. [官网](http://lxml.de/)

deflxml_parser(page):data=[]doc=etree.HTML(page)all_div=doc.xpath('//div[@class="yingping-list-wrap"]')forrowinall_div:#获取每一个影评,即影评的itemall_div_item=row.xpath('.//div[@class="item"]')#find_all('div',attrs={'class':'item'})forrinall_div_item:value={}#获取影评的标题部分title=r.xpath('.//div[@class="g-cleartitle-wrap"][1]')value['title']=title[0].xpath('./a/text()')[0]value['title_href']=title[0].xpath('./a/@href')[0]score_text=title[0].xpath('./div/span/span/@style')[0]score_text=re.search(r'\d+',score_text).group()value['score']=int(score_text)/20#时间value['time']=title[0].xpath('./div/span[@class="time"]/text()')[0]#多少人喜欢value['people']=int(re.search(r'\d+',title[0].xpath('./div[@class="num"]/span/text()')[0]).group())data.append(value)returndata

2、使用BeautifulSoup,不多说了,大家网上找资料看看。

defbs4_paraser(html):all_value=[]value={}soup=BeautifulSoup(html,'html.parser')#获取影评的部分all_div=soup.find_all('div',attrs={'class':'yingping-list-wrap'},limit=1)forrowinall_div:#获取每一个影评,即影评的itemall_div_item=row.find_all('div',attrs={'class':'item'})forrinall_div_item:#获取影评的标题部分title=r.find_all('div',attrs={'class':'g-cleartitle-wrap'},limit=1)iftitleisnotNoneandlen(title)>0:value['title']=title[0].a.stringvalue['title_href']=title[0].a['href']score_text=title[0].div.span.span['style']score_text=re.search(r'\d+',score_text).group()value['score']=int(score_text)/20#时间value['time']=title[0].div.find_all('span',attrs={'class':'time'})[0].string#多少人喜欢value['people']=int(re.search(r'\d+',title[0].find_all('div',attrs={'class':'num'})[0].span.string).group())#printrall_value.append(value)value={}returnall_value

3、使用SGMLParser,主要是通过start、end tag的方式进行,解析工程比较明朗,但是有点麻烦,而该案例的场景不太适合该方法。

classCommentParaser(SGMLParser):def__init__(self):SGMLParser.__init__(self)self.__start_div_yingping=Falseself.__start_div_item=Falseself.__start_div_gclear=Falseself.__start_div_ratingwrap=Falseself.__start_div_num=False#aself.__start_a=False#span3中状态self.__span_state=0#数据self.__value={}self.data=[]defstart_div(self,attrs):fork,vinattrs:ifk=='class'andv=='yingping-list-wrap':self.__start_div_yingping=Trueelifk=='class'andv=='item':self.__start_div_item=Trueelifk=='class'andv=='g-cleartitle-wrap':self.__start_div_gclear=Trueelifk=='class'andv=='rating-wrapg-clear':self.__start_div_ratingwrap=Trueelifk=='class'andv=='num':self.__start_div_num=Truedefend_div(self):ifself.__start_div_yingping:ifself.__start_div_item:ifself.__start_div_gclear:ifself.__start_div_numorself.__start_div_ratingwrap:ifself.__start_div_num:self.__start_div_num=Falseifself.__start_div_ratingwrap:self.__start_div_ratingwrap=Falseelse:self.__start_div_gclear=Falseelse:self.data.append(self.__value)self.__value={}self.__start_div_item=Falseelse:self.__start_div_yingping=Falsedefstart_a(self,attrs):ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclear:self.__start_a=Truefork,vinattrs:ifk=='href':self.__value['href']=vdefend_a(self):ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclearandself.__start_a:self.__start_a=Falsedefstart_span(self,attrs):ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclear:ifself.__start_div_ratingwrap:ifself.__span_state!=1:fork,vinattrs:ifk=='class'andv=='rating':self.__span_state=1elifk=='class'andv=='time':self.__span_state=2else:fork,vinattrs:ifk=='style':score_text=re.search(r'\d+',v).group()self.__value['score']=int(score_text)/20self.__span_state=3elifself.__start_div_num:self.__span_state=4defend_span(self):self.__span_state=0defhandle_data(self,data):ifself.__start_a:self.__value['title']=dataelifself.__span_state==2:self.__value['time']=dataelifself.__span_state==4:score_text=re.search(r'\d+',data).group()self.__value['people']=int(score_text)passdefsgl_parser(html):parser=CommentParaser()parser.feed(html)returnparser.data

4、HTMLParaer,与3原理相识,就是调用的方法不太一样,基本上可以公用。

classCommentHTMLParser(HTMLParser.HTMLParser):def__init__(self):HTMLParser.HTMLParser.__init__(self)self.__start_div_yingping=Falseself.__start_div_item=Falseself.__start_div_gclear=Falseself.__start_div_ratingwrap=Falseself.__start_div_num=False#aself.__start_a=False#span3中状态self.__span_state=0#数据self.__value={}self.data=[]defhandle_starttag(self,tag,attrs):iftag=='div':fork,vinattrs:ifk=='class'andv=='yingping-list-wrap':self.__start_div_yingping=Trueelifk=='class'andv=='item':self.__start_div_item=Trueelifk=='class'andv=='g-cleartitle-wrap':self.__start_div_gclear=Trueelifk=='class'andv=='rating-wrapg-clear':self.__start_div_ratingwrap=Trueelifk=='class'andv=='num':self.__start_div_num=Trueeliftag=='a':ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclear:self.__start_a=Truefork,vinattrs:ifk=='href':self.__value['href']=veliftag=='span':ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclear:ifself.__start_div_ratingwrap:ifself.__span_state!=1:fork,vinattrs:ifk=='class'andv=='rating':self.__span_state=1elifk=='class'andv=='time':self.__span_state=2else:fork,vinattrs:ifk=='style':score_text=re.search(r'\d+',v).group()self.__value['score']=int(score_text)/20self.__span_state=3elifself.__start_div_num:self.__span_state=4defhandle_endtag(self,tag):iftag=='div':ifself.__start_div_yingping:ifself.__start_div_item:ifself.__start_div_gclear:ifself.__start_div_numorself.__start_div_ratingwrap:ifself.__start_div_num:self.__start_div_num=Falseifself.__start_div_ratingwrap:self.__start_div_ratingwrap=Falseelse:self.__start_div_gclear=Falseelse:self.data.append(self.__value)self.__value={}self.__start_div_item=Falseelse:self.__start_div_yingping=Falseeliftag=='a':ifself.__start_div_yingpingandself.__start_div_itemandself.__start_div_gclearandself.__start_a:self.__start_a=Falseeliftag=='span':self.__span_state=0defhandle_data(self,data):ifself.__start_a:self.__value['title']=dataelifself.__span_state==2:self.__value['time']=dataelifself.__span_state==4:score_text=re.search(r'\d+',data).group()self.__value['people']=int(score_text)passdefhtml_parser(html):parser=CommentHTMLParser()parser.feed(html)returnparser.data

看完了这篇文章,相信你对python3解析html的方法有了一定的了解,想了解更多相关知识,欢迎关注亿速云行业资讯频道,感谢各位的阅读!