本篇内容主要讲解“如何用Python爬取当当、京东、亚马逊图书信息”,感兴趣的朋友不妨来看看。本文介绍的方法操作简单快捷,实用性强。下面就让小编来带大家学习“如何用Python爬取当当、京东、亚马逊图书信息”吧!

注:
1.本程序采用 MSSQLserver 数据库存储,请运行程序前手动修改程序开头处的数据库链接信息;
2.需要 bs4、requests、pymssql 库支持;
3.支持多线程。

源码

frombs4importBeautifulSoupimportre,requests,pymysql,threading,os,tracebacktry:conn=pymysql.connect(host='127.0.0.1',port=3306,user='root',passwd='root',db='book',charset="utf8")cursor=conn.cursor()except:print('\\\\n错误:数据库连接失败')#返回指定页面的html信息defgetHTMLText(url):try:headers={'User-Agent':'Mozilla/5.0(WindowsNT6.1;Win64;x64)AppleWebKit/537.36(KHTML,likeGecko)Chrome/56.0.2924.87Safari/537.36'}r=requests.get(url,headers=headers)r.raise_for_status()r.encoding=r.apparent_encodingreturnr.textexcept:return''#返回指定url的Soup对象defgetSoupObject(url):try:html=getHTMLText(url)soup=BeautifulSoup(html,'html.parser')returnsoupexcept:return''#获取该关键字在图书网站上的总页数defgetPageLength(webSiteName,url):try:soup=getSoupObject(url)ifwebSiteName=='DangDang':a=soup('a',{'name':'bottom-page-turn'})returna[-1].stringelifwebSiteName=='Amazon':a=soup('span',{'class':'pagnDisabled'})returna[-1].stringexcept:print('\\\\n错误:获取{}总页数时出错...'.format(webSiteName))return-1classDangDangThread(threading.Thread):def__init__(self,keyword):threading.Thread.__init__(self)self.keyword=keyworddefrun(self):print('\\\\n提示:开始爬取当当网数据...')count=1length=getPageLength('DangDang','http://search.dangdang.com/?key={}'.format(self.keyword))#总页数tableName='db_{}_dangdang'.format(self.keyword)try:print('\\\\n提示:正在创建DangDang表...')cursor.execute('createtable{}(idint,titletext,prNowtext,prPretext,linktext)'.format(tableName))print('\\\\n提示:开始爬取当当网页面...')foriinrange(1,int(length)):url='http://search.dangdang.com/?key={}&page_index={}'.format(self.keyword,i)soup=getSoupObject(url)lis=soup('li',{'class':re.compile(r'line'),'id':re.compile(r'p')})forliinlis:a=li.find_all('a',{'name':'itemlist-title','dd_name':'单品标题'})pn=li.find_all('span',{'class':'search_now_price'})pp=li.find_all('span',{'class':'search_pre_price'})ifnotlen(a)==0:link=a[0].attrs['href']title=a[0].attrs['title'].strip()else:link='NULL'title='NULL'ifnotlen(pn)==0:prNow=pn[0].stringelse:prNow='NULL'ifnotlen(pp)==0:prPre=pp[0].stringelse:prPre='NULL'sql="insertinto{}(id,title,prNow,prPre,link)values({},'{}','{}','{}','{}')".format(tableName,count,title,prNow,prPre,link)cursor.execute(sql)print('\\\\r提示:正在存入当当数据,当前处理id:{}'.format(count),end='')count+=1conn.commit()except:passclassAmazonThread(threading.Thread):def__init__(self,keyword):threading.Thread.__init__(self)self.keyword=keyworddefrun(self):print('\\\\n提示:开始爬取亚马逊数据...')count=1length=getPageLength('Amazon','https://www.amazon.cn/s/keywords={}'.format(self.keyword))#总页数tableName='db_{}_amazon'.format(self.keyword)try:print('\\\\n提示:正在创建Amazon表...')cursor.execute('createtable{}(idint,titletext,prNowtext,linktext)'.format(tableName))print('\\\\n提示:开始爬取亚马逊页面...')foriinrange(1,int(length)):url='https://www.amazon.cn/s/keywords={}&page={}'.format(self.keyword,i)soup=getSoupObject(url)lis=soup('li',{'id':re.compile(r'result_')})forliinlis:a=li.find_all('a',{'class':'a-link-normals-access-detail-pagea-text-normal'})pn=li.find_all('span',{'class':'a-size-basea-color-prices-pricea-text-bold'})ifnotlen(a)==0:link=a[0].attrs['href']title=a[0].attrs['title'].strip()else:link='NULL'title='NULL'ifnotlen(pn)==0:prNow=pn[0].stringelse:prNow='NULL'sql="insertinto{}(id,title,prNow,link)values({},'{}','{}','{}')".format(tableName,count,title,prNow,link)cursor.execute(sql)print('\\\\r提示:正在存入亚马逊数据,当前处理id:{}'.format(count),end='')count+=1conn.commit()except:passclassJDThread(threading.Thread):def__init__(self,keyword):threading.Thread.__init__(self)self.keyword=keyworddefrun(self):print('\\\\n提示:开始爬取京东数据...')count=1tableName='db_{}_jd'.format(self.keyword)try:print('\\\\n提示:正在创建JD表...')cursor.execute('createtable{}(idint,titletext,prNowtext,linktext)'.format(tableName))print('\\\\n提示:开始爬取京东页面...')foriinrange(1,100):url='https://search.jd.com/Search?keyword={}&page={}'.format(self.keyword,i)soup=getSoupObject(url)lis=soup('li',{'class':'gl-item'})forliinlis:a=li.find_all('div',{'class':'p-name'})pn=li.find_all('div',{'class':'p-price'})[0].find_all('i')ifnotlen(a)==0:link='http:'+a[0].find_all('a')[0].attrs['href']title=a[0].find_all('em')[0].get_text()else:link='NULL'title='NULL'if(len(link)>128):link='TooLong'ifnotlen(pn)==0:prNow='¥'+pn[0].stringelse:prNow='NULL'sql="insertinto{}(id,title,prNow,link)values({},'{}','{}','{}')".format(tableName,count,title,prNow,link)cursor.execute(sql)print('\\\\r提示:正在存入京东网数据,当前处理id:{}'.format(count),end='')count+=1conn.commit()except:passdefcloseDB():globalconn,cursorconn.close()cursor.close()defmain():print('提示:使用本程序,请手动创建空数据库:Book,并修改本程序开头的数据库连接语句')keyword=input("\\\\n提示:请输入要爬取的关键字:")dangdangThread=DangDangThread(keyword)amazonThread=AmazonThread(keyword)jdThread=JDThread(keyword)dangdangThread.start()amazonThread.start()jdThread.start()dangdangThread.join()amazonThread.join()jdThread.join()closeDB()print('\\\\n爬取已经结束,即将关闭....')os.system('pause')main()示例截图:

关键词:Android 下的部分运行结果(以导出至 Excel )

到此,相信大家对“如何用Python爬取当当、京东、亚马逊图书信息”有了更深的了解,不妨来实际操作一番吧!这里是亿速云网站,更多相关内容可以进入相关频道进行查询,关注我们,继续学习!