广州睿东网络科技有限公司是国内最专业的香港空间,云主机,香港VPS,香港服务器租用提供商,专注为国内站长提供高速且稳定的香港空间,云主机,香港VPS,香港服务器租用,欢迎您的选购!
当前位置:首页 -> seo模板 -> phpcms模板

[Python爬虫代码]抓取京东商品价格走势及图书评论数据

云服务器 34℃ 1885评论
SAS中文论坛

为了更好的支持会员间互动,本微信公众号现已开通评论功能。大家可以在本帖最底部进行评论和相互交流留言。

项目一

根据京东商品url获取产品价格

尝试抓一下京东的数据,需要使用到的库有:BeautifulSoup,urllib2,在Python2下测试通过。

京东商品详细的请求处理,是先显示html,然后再ajax请求处理显示价格。
1.可以运行js,并解析之后得到的html
2.模拟js请求,得到价格

# -*- coding: utf-8 -*-
"""
根据京东url地址,获取商品价格
京东请求处理过程,先显示html页面,然后通过ajax get请求获取相应的商品价格


import urllib
import json
import re
class JdPrice(object):
"""
对获取京东商品价格进行简单封装
"""
def __init__(self, url):
self.url = url
self._response = urllib.urlopen(self.url)
self.html = self._response.read()
def get_product(self):
"""
获取html中,商品的描述(未对数据进行详细处理,粗略的返回str类型)
:return:
"""
product_re = re.compile(r'compatible: true,(.*?)};', re.S)
product_info = re.findall(product_re, self.html)[0]
return product_info
def get_product_skuid(self):
"""
通过获取的商品信息,获取商品的skuid
:return:
"""
product_info = self.get_product()
skuid_re = re.compile(r'skuid: (.*?),')
skuid = re.findall(skuid_re, product_info)[0]
return skuid
def get_product_name(self):
pass
def get_product_price(self):
"""
根据商品的skuid信息,请求获得商品price
:return:
"""
price = None
skuid = self.get_product_skuid()
url = 'http://p.3.cn/prices/mgets?skuIds=J_' + skuid + '&type=1'
price_json = json.load(urllib.urlopen(url))[0]
if price_json['p']:
price = price_json['p']
return price
# 测试代码
if __name__ == '__main__':
url = 'http://item.jd.com/1310118868.html'
url = 'http://item.jd.com/1044773.html'
jp = JdPrice(url)
print jp.get_product_price()
# htm.decode('gb2312', 'ignore').encode('utf-8')
# f = open('jjs.html', 'w')
# f.write(htm)

# f.close()




再给大家分享一个京东价格的爬虫:



fromcreepyimportCrawler

fromBeautifulSoupimportBeautifulSoup
importurllib2
importjson
classMyCrawler(Crawler):
defprocess_document(self,doc):
ifdoc.status==200:
print[%d]%s%(doc.status,doc.url)
try:
soup=BeautifulSoup(doc.text.decode(gb18030).encode(utf-8))
exceptExceptionase:
printe
soup=BeautifulSoup(doc.text)
printsoup.find(id="product-intro").div.h1.text
url_id=urllib2.unquote(doc.url).decode(utf8).split(/)[-1].split(.)[0]
f=urllib2.urlopen(http://p.3.cn/prices/get?skuid=J_+url_id,timeout=5)
price=json.loads(f.read())
f.close()
printprice[0][p]
else:
pass
crawler=MyCrawler()
crawler.set_follow_mode(Crawler.F_SAME_HOST)
crawler.set_concurrency_level(16)
crawler.add_url_filter(.(jpg|jpeg|gif|png|js|css|swf)$)
crawler.crawl(http://item.jd.com/982040.html)


项目二

抓取京东图书评论数据


京东图书评论有非常丰富的信息,这里面就包含了购买日期、书名、作者、好评、中评、差评等等。以购买日期为例,使用Python + Mysql的搭配进行实现,程序不大,才100行。相关的解释我都在程序里加注了:


from selenium import webdriver
from bs4 import BeautifulSoup
import re
import win32com.client
import threading,time
import MySQLdb

def mydebug():
driver.quit()
exit(0)

def catchDate(s):
"""页面数据提取"""
soup = BeautifulSoup(s)
z = []
global nowtimes

m = soup.findAll("div",class_="date-buy")
for obj in m:
try:
tmp = obj.find('br').contents
except Exception, e:
continue
if(tmp != ""):
z.append(tmp)
nowtimes += 1
return z

def getTimes(n,t):
"""获取当前进度"""
return "当前进度为:" + str(int(100*n/t)) + "%"


#———————————————————————————————————| 程序开始 |—————————————————————————————————
#确定图书大类
cate = {"3273":"历史","3279":"心理学","3276":"政治军事","3275":"国学古籍","3274":"哲学宗教","3277":"法律","3280":"文化","3281":"社会科学"}

#断点续抓
num1 = input("bookid:")
num2 = input("pagenumber:")

#生成图书大类链接,共需17355*20 = 347100次
totaltimes = 347100.0
nowtimes = 0

#开启webdirver的PhantomJS对象
#driver = webdriver.PhantomJS()
driver = webdriver.Ie('C:Python27ScriptsIEDriverServer')
#driver = webdriver.Chrome('C:Python27Scriptschromedriver')

#读出Mysql中的评论页面,进行抓取
# 连接数据库 
try:
conn = MySQLdb.connect(host='localhost',user='root',passwd='',db='jd')
except Exception, e:
print e
sys.exit()

# 获取cursor对象
cursor = conn.cursor()
sql = "SELECT * FROM booknew ORDER BY pagenumber DESC"
cursor.execute(sql)
alldata = cursor.fetchall()

flag = 0
flag2 = 0

# 如果有数据返回就循环输出,http://club.jd.com/review/10178500-1-154.html
if alldata:
for rec in alldata:
#rec[0]--bookid,rec[1]--cateid,rec[2]--pagenumber
if(rec[0] != str(num1) and flag == 0):
continue
else:
flag = 1
for p in range(num2,rec[2]):
if(flag2 == 0):
num2 = 0
flag2 = 1
p += 1
link = "http://club.jd.com/review/" + rec[0] + "-1-" + str(p) + ".html"
#抓网页
driver.get(link)
html = driver.page_source
#抓评论
buydate = catchDate(html)
#写入数据库
for z in buydate:
sql = "INSERT INTO ljj (id, cateid, bookid, date) VALUES (NULL, '" + rec[0] + "','" + rec[1] + "','" + z[0] + "');"
try:
cursor.execute(sql)
except Exception, e:
print e
conn.commit()
print getTimes(nowtimes,totaltimes)

driver.quit()
cursor.close()
conn.close()



项目三

根据京东商品url获取产品价格


#-*- coding: UTF-8 -*-

'''

Created on 2013-12-5

@author: good-temper

'''

import urllib2

import bs4

import time

def getPage(urlStr):

'''

获取页面内容

'''

content = urllib2.urlopen(urlStr).read()

return content

def getNextPageUrl(currPageNum):

#http://list.jd.com/9987-653-655-0-0-0-0-0-0-0-1-1-页码-1-1-72-4137-33.html

url = u'http://list.jd.com/9987-653-655-0-0-0-0-0-0-0-1-1-'+str(currPageNum+1)+'-1-1-72-4137-33.html'


#是否有下一页

content = getPage(url);

soup = bs4.BeautifulSoup(content)

list = soup.findAll('span',{'class':'next-disabled'});

if(len(list) == 0):

return url

return ''


def analyzeList():

pageNum = 0

list = []

url = getNextPageUrl(pageNum)

while url !='':

soup = bs4.BeautifulSoup(getPage(url))

pagelist = soup.findAll('div',{'class':'p-name'})

for elem in pagelist:

soup1 = bs4.BeautifulSoup(str(elem))

list.append(soup1.find('a')['href'])


pageNum = pageNum+1

print pageNum

url = getNextPageUrl(pageNum)

return list

def analyzeContent(url):


return ''

def writeToFile(list, path):

f = open(path, 'a')

for elem in list:

f.write(elem+' ')

f.close()

if __name__ == '__main__':

list = analyzeList()

print '共抓取'+str(len(list))+'条 '


writeToFile(list, u'E:\jd_phone_list.dat');



项目四

抓取豆瓣电影链接和评论



import urllib.request

import re

import time

def movie(movieTag):


tagUrl=urllib.request.urlopen(url)

tagUrl_read = tagUrl.read().decode('utf-8')

return tagUrl_read

def subject(tagUrl_read):

'''

这里还存在问题:

①这只针对单独的一页进行排序,而没有对全部页面的电影进行排序

②下次更新添加电影链接,考虑添加电影海报

③需要追加列表

④导入到本地txt或excel中

⑤在匹配电影名字时是否可以同时匹配链接与名字、评分、评论组成数组

'''

#正则表达式匹配电影的名字(链接)、评分与评论

nameURL = re.findall(r'(http://movie.douban.com/subject/[0-9.]+)/"s+title="(.+)"',tagUrl_read)

scoreURL = re.findall(r'<spans+class="rating_nums">([0-9.]+)</span>',tagUrl_read)

evaluateURL = re.findall(r'<spans+class="pl">((w+)人评价)</span>',tagUrl_read)

movieLists = list(zip(nameURL,scoreURL,evaluateURL))

newlist.extend(movieLists)

return newlist

#用quote处理特殊(中文)字符

movie_type = urllib.request.quote(input('请输入电影类型(如剧情、喜剧、悬疑):'))

page_end=int(input('请输入搜索结束时的页码:'))

num_end=page_end*20

num=0

page_num=1

newlist=[]

while num<num_end:

url=r'http://movie.douban.com/tag/%s?start=%d'%(movie_type,num)

movie_url = movie(url)

subject_url=subject(movie_url)

num=page_num*20

page_num+=1

else:

#使用sorted函数对列表进行排列,reverse参数为True时升序,默认或False时为降序, key=lambda还不是很明白这里的原理

movieLIST = sorted(newlist, key=lambda movieList : movieList[1],reverse = True)

for movie in movieLIST:

print(movie)


time.sleep(3)

print('结束')



如果您觉得我们的内容对您还有点儿用,可以尝试长按上图二维码打赏我们!^_^

阅读原文

发送中

阅读原文