code stringlengths 1 1.49M | vector listlengths 0 7.38k | snippet listlengths 0 7.38k |
|---|---|---|
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-29
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from j360buy.image_price import captcha_360buy
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import string
import threading
import urllib
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
j360buyRoot = ObuyUrlSummary(url=r'http://www.360buy.com/allSort.aspx', name='360buy',
isRecursed=True, catagoryLevel=0)
def translator(frm='', to='', delete='', keep=None):
if len(to) == 1:
to = to * len(frm)
trans = string.maketrans(frm, to)
if keep is not None:
allchars = string.maketrans('', '')
delete = allchars.translate(allchars, keep.translate(allchars, delete))
def translate(s):
return s.translate(trans, delete)
return translate
digits_only = translator(keep=string.digits)
class J360buyAllSortParser(RootCatagoryPageParser):
'''
从http://www.360buy.com/allSort.aspx获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.360buy.com'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buyAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'id':'allsort'})
for t in allSort.findAll(name='div', attrs={'id':re.compile('JDS_[0-9]+')}):#一级分类
sort_1 = t.find(name='div', attrs={'class':'mt'})
name, url = ParserUtils.parserTag_A(sort_1.h2.a)
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(name='div', attrs={'class':'mc'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
url = ''.join((self.mainHost, url))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
url = ''.join((self.mainHost, '/', url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class J360buySort3PageParser(Sort3PageParser):
'''
360Buy三级页面解析类
'''
pricePageNum = 4
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div', attrs={'id':'filter'}).find(attrs={'class':'pagin pagin-m fr'})
totalPage = int(pageSeg.span.string.split('/')[-1])
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def __getAdWords(self, plist):
adQueryDict = eval(re.compile(r'{.*}').search(str(plist.script)).group())
baseUrl = 'http://www.360buy.com/JdService.aspx?callback=GetJdwsmentsCallback&action=GetJdwsment'
url = '&'.join((baseUrl, urllib.urlencode(adQueryDict)))
result = crawleRetries(url)
ct = re.compile(r'{.*}').search(result.content)
if ct is None:
return []
jObj = json.loads(ct.group())
return jObj['html']
def parserPageInfos(self):
resultList = []
plist = self.soup.find(name='div', attrs={'id':'plist'})
if plist is None:
raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
pid_ad = dict([[int(wa['Wid']), wa['AdTitle']] for wa in self.__getAdWords(plist)])
for li in plist(name='li', attrs={'sku':re.compile('[0-9]+')}):
if li['sku'].startswith('100'): #过滤非京东自营商品
continue
pid = int(li['sku'])
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'p-name'}).a)
priceImgUrl = li.find(name='div', attrs={'class':'p-price'}).img['src']
adWords = pid_ad.get(pid, '')
imgUrlSeg = li.find(name='div',attrs={'class':'p-img'}).find(name='img')
imgUrl = ''
if imgUrlSeg:
try:
imgUrl = imgUrlSeg['src']
except Exception:
imgUrl = imgUrlSeg['src2']
evaluateNumSeg = li.find(name='span',attrs={'class':'evaluate'})
reputationSeg = li.find(name='span',attrs={'class':'reputation'}) #好评度
reputation = ParserUtils.getDigit(reputationSeg.getText())
evaluateNum = ParserUtils.getDigit(evaluateNumSeg.getText())
prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords,imageUrl=imgUrl,
reputation=reputation,evaluateNum=evaluateNum,fullUrl=url)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_360buy], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class J360buySort4PageParser(J360buySort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:J360buyAllSortParser, 3:J360buySort3PageParser, 4:J360buySort4PageParser}
import hashlib
def getMd5Key(src):
m2 = hashlib.md5()
m2.update(src)
dest2 = int(m2.hexdigest(), 16)
return dest2
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test360BuyAllSortPage():
fileName = os.path.join(testFilePath, 'allSort.aspx')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.360buy.com/allSort.aspx', name='360buy')
firstPage = J360buyAllSortParser(content, rootUrlSum)
s0 = set()
s1 = set()
s2 = set()
for sort_3 in firstPage.getBaseSort3UrlSums():
parentPath = sort_3.parentPath
s0.add(parentPath[1].url)
s1.add(parentPath[2].url)
s2.add(sort_3.url)
sa = set()
import itertools
for t in itertools.chain(s0,s1,s2):
sa.add(str(getMd5Key(t))[0:16])
print len(sa)
print len(s0),len(s1),len(s2)
def testSort3Page():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4
def testSort3Details():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-798-0-0-0-0-0-0-0-1-1-1-1-72-33.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlSum = sort_3_urlsum)
sort3Page = J360buySort3PageParser(result.content, sort_3_urlsum)
for prod in sort3Page.parserPageInfos():
print prod.logstr()
if __name__ == '__main__':
#test360BuyAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0324,
0.0324,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0556,
0.0046,
0,
0.66,
0.037,
275,
0,
1,
0,
0,
275,
0,
0
],
[
1,
0,
0.0602,
0.0046,
0,
0.66,... | [
"'''\nCreated on 2011-7-29\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from j360buy.image_price import captcha_360buy",
"from pageparser import *",
"from threadpool import ThreadPool, WorkRequest",
"import json",
"import os",
"import re",
"import string",
"import threading",
"import url... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
J360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | [
[
8,
0,
0.0282,
0.0235,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5258,
0.9531,
0,
0.66,
1,
637,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"J360buy_FEATURES_MAP__ = {\n '''\n __\n __\n __\n __\n __\n __"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from j360buy.j360_feature import J360buy_FEATURES_MAP__
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_360Buy(CaptchaProfile):
def __init__(self,features_map = J360buy_FEATURES_MAP__):
super(CaptchaProfile_360Buy,self).__init__(features_map)
def __new__(cls,features_map = J360buy_FEATURES_MAP__):
return super(CaptchaProfile_360Buy, cls).__new__(cls,features_map)
def split(self, im,top = 3,bottom = 11):
matrix = {(48,12) : [(15, 3, 21, 11), (23, 3, 25, 11),(27,3,33,11),(35,3,41,11)],
(52,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,33,11),(35,3,41,11),(43,3,49,11)],
(65,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,41,11),(43,3,49,11),(51,3,57,11)],
(75,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,49,11),(51,3,57,11),(59, 3, 65, 11)],
(80,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,53,11),(55,3,57,11),(59, 3, 65, 11),(67,3,73,11)]
}
return [im.crop(box) for box in matrix[im.size]]
def captcha_360buy(filename):
return captcha(filename, CaptchaProfile_360Buy())
def test():
print captcha_360buy(r'c:\gp359329,2.png')
if __name__ == '__main__':
im = Image.open(r'c:\1.png')
im2 = Image.open(r'c:\1.png')
diff = ImageChops.difference(im, im2)
im = im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
dt = im.getdata()
print im.size
it1 = im.crop((15, 3, 21, 11))
it2 = im.crop((23, 3, 29, 11))
it3 = im.crop((31, 3, 37, 11))
it4 = im.crop((39, 3, 45, 11))
it5 = im.crop((47, 3, 49, 11))
it6 = im.crop((51, 3, 57, 11))
it7 = im.crop((59, 3, 65, 11))
cia = CaptchaImageAlgorithm()
s7 = cia.GetBinaryMap(it1)
print s7
profile = CaptchaProfile_360Buy()
print '+++++++++++++++++++++++++++'
for t in range(100):
print captcha_360buy(r'c:\5.png')
| [
[
8,
0,
0.0972,
0.0972,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1667,
0.0139,
0,
0.66,
0.0909,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1806,
0.0139,
0,
0.66... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from j360buy.j360_feature import J360buy_FEATURES_MAP__",
"import Image",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360buy.j360pageparser import parserDict,j360buyRoot
from spider import main
if __name__ == '__main__':
main(j360buyRoot,parserDict)
| [
[
8,
0,
0.3889,
0.3889,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6667,
0.0556,
0,
0.66,
0.3333,
666,
0,
2,
0,
0,
666,
0,
0
],
[
1,
0,
0.7222,
0.0556,
0,
0.66... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n\n\n'''",
"from j360buy.j360pageparser import parserDict,j360buyRoot",
"from spider import main",
"if __name__ == '__main__':\n\n main(j360buyRoot,parserDict)",
" main(j360buyRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
J360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | [
[
8,
0,
0.0282,
0.0235,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5258,
0.9531,
0,
0.66,
1,
637,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"J360buy_FEATURES_MAP__ = {\n '''\n __\n __\n __\n __\n __\n __"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
from logfacade import LoggerFactory
'''
Created on 2011-10-9
@author: zhongfeng
'''
from crawlerhttp import crawle
import re
from threadpool import ThreadPool, WorkRequest
from dbproc.basedbproc import getConnect,MySQLQueryPagination
def getProdImgUrlFromProdId(*prodId):
rawId,id = prodId[0]
baseUrl = r'http://www.360buy.com/lishiset.aspx?callback=jdRecent.setData&id=%s'
url = baseUrl % rawId
result = crawle(url)
imgUrl = ''
if result.code == 200:
ct = re.compile(r'{.*}').search(result.content)
if ct != None:
jd = eval(ct.group())
imgUrl = jd['img']
imgUrl = imgUrl.replace('n5', 'n2',1)
return id,imgUrl
def proc_result(request, result):
logger = LoggerFactory.getLogger(logName='360buy')
logger.info(result)
def main_crawle(prodIdList,thread_num = 20):
try:
pool = ThreadPool(thread_num)
for prodId in prodIdList:
req = WorkRequest(getProdImgUrlFromProdId, [prodId], None,
callback=proc_result)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=thread_num)
if __name__ == '__main__':
conn = getConnect()
queryPagin = MySQLQueryPagination(conn,numPerPage = 1000)
sql = 'SELECT raw_id,id FROM `prod_base_info_3c` where site_id = 6'
for prodIds in queryPagin.queryForList(sql):
main_crawle(prodIds)
conn.close()
| [
[
1,
0,
0.0556,
0.0185,
0,
0.66,
0,
602,
0,
1,
0,
0,
602,
0,
0
],
[
8,
0,
0.1481,
0.0926,
0,
0.66,
0.1111,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2222,
0.0185,
0,
0.66... | [
"from logfacade import LoggerFactory",
"'''\nCreated on 2011-10-9\n\n@author: zhongfeng\n'''",
"from crawlerhttp import crawle",
"import re",
"from threadpool import ThreadPool, WorkRequest",
"from dbproc.basedbproc import getConnect,MySQLQueryPagination",
"def getProdImgUrlFromProdId(*prodId):\n raw... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
from logfacade import LoggerFactory
'''
Created on 2011-10-9
@author: zhongfeng
'''
from crawlerhttp import crawle
import re
from threadpool import ThreadPool, WorkRequest
from dbproc.basedbproc import getConnect,MySQLQueryPagination
def getProdImgUrlFromProdId(*prodId):
rawId,id = prodId[0]
baseUrl = r'http://www.360buy.com/lishiset.aspx?callback=jdRecent.setData&id=%s'
url = baseUrl % rawId
result = crawle(url)
imgUrl = ''
if result.code == 200:
ct = re.compile(r'{.*}').search(result.content)
if ct != None:
jd = eval(ct.group())
imgUrl = jd['img']
imgUrl = imgUrl.replace('n5', 'n2',1)
return id,imgUrl
def proc_result(request, result):
logger = LoggerFactory.getLogger(logName='360buy')
logger.info(result)
def main_crawle(prodIdList,thread_num = 20):
try:
pool = ThreadPool(thread_num)
for prodId in prodIdList:
req = WorkRequest(getProdImgUrlFromProdId, [prodId], None,
callback=proc_result)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=thread_num)
if __name__ == '__main__':
conn = getConnect()
queryPagin = MySQLQueryPagination(conn,numPerPage = 1000)
sql = 'SELECT raw_id,id FROM `prod_base_info_3c` where site_id = 6'
for prodIds in queryPagin.queryForList(sql):
main_crawle(prodIds)
conn.close()
| [
[
1,
0,
0.0556,
0.0185,
0,
0.66,
0,
602,
0,
1,
0,
0,
602,
0,
0
],
[
8,
0,
0.1481,
0.0926,
0,
0.66,
0.1111,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2222,
0.0185,
0,
0.66... | [
"from logfacade import LoggerFactory",
"'''\nCreated on 2011-10-9\n\n@author: zhongfeng\n'''",
"from crawlerhttp import crawle",
"import re",
"from threadpool import ThreadPool, WorkRequest",
"from dbproc.basedbproc import getConnect,MySQLQueryPagination",
"def getProdImgUrlFromProdId(*prodId):\n raw... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360buy.j360pageparser import parserDict,j360buyRoot
from spider import main
if __name__ == '__main__':
main(j360buyRoot,parserDict)
| [
[
8,
0,
0.3889,
0.3889,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6667,
0.0556,
0,
0.66,
0.3333,
666,
0,
2,
0,
0,
666,
0,
0
],
[
1,
0,
0.7222,
0.0556,
0,
0.66... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n\n\n'''",
"from j360buy.j360pageparser import parserDict,j360buyRoot",
"from spider import main",
"if __name__ == '__main__':\n\n main(j360buyRoot,parserDict)",
" main(j360buyRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-9-24
@author: zhongfeng
'''
import re,os,sys
import chardet
from pageparser import ObuyUrlSummary
from utils import Singleton
from ConfigParser import ConfigParser, NoOptionError
def __getUrlSumsFromSection(section):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'urls.cfg')
print 'spider.cfg full path:%s' % fileName
urls = list()
if not os.path.exists(fileName):
return urls
regx = r'\[%s\]' % section
includeSecRegx = re.compile(regx)
otherSecRegx = re.compile(r'\[.*\]')
flag = False
with file(fileName) as inputFile:
for line in inputFile:
encoding = chardet.detect(line)['encoding']
line = line.decode(encoding,'ignore')
if (not flag) and includeSecRegx.match(line):
flag = True
elif flag:
if otherSecRegx.match(line):
break
if line.strip() != '':
line = ' '.join(line.split())
ret = line.split(',')
ret = [it.strip() for it in ret]
urlSumm = ObuyUrlSummary(name = ret[1],url = ret[0],catagoryLevel = int(ret[2]))
urls.append(urlSumm)
return urls
def getIncludeUrlSums():
return __getUrlSumsFromSection('include')
def getExcludeUrlSums():
return __getUrlSumsFromSection('exclude')
class SpiderConfig(Singleton):
@classmethod
def _init(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'spider.conf')
cls.cf = ConfigParser()
cls.cf.read(fileName)
@classmethod
def getConfig(cls,option):
if not hasattr(cls, 'cf'):
cls._init()
try:
return cls.cf.get('conf',option)
except Exception:
pass
@classmethod
def getMaxPage(cls):
ret = cls.getConfig('max_page')
if ret:
return int(ret)
return 50
@classmethod
def getThreadNum(cls):
threadNum = cls.getConfig('thread_num')
if threadNum:
return int(threadNum)
return 10
@classmethod
def getProxy(cls):
return cls.getConfig('ftp_proxy')
@classmethod
def isStartSpider(cls):
flag = int(cls.getConfig('is_spider'))
if flag is not None:
return flag
return True
@classmethod
def isUpload(cls):
flag = int(cls.getConfig('is_upload'))
if flag is not None:
return flag
return True
if __name__ == '__main__':
print SpiderConfig.getMaxPage()
print SpiderConfig.getThreadNum()
print SpiderConfig.getProxy()
| [
[
8,
0,
0.0577,
0.0481,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0962,
0.0096,
0,
0.66,
0.1,
540,
0,
3,
0,
0,
540,
0,
0
],
[
1,
0,
0.1058,
0.0096,
0,
0.66,
... | [
"'''\nCreated on 2011-9-24\n\n@author: zhongfeng\n'''",
"import re,os,sys",
"import chardet",
"from pageparser import ObuyUrlSummary",
"from utils import Singleton",
"from ConfigParser import ConfigParser, NoOptionError",
"def __getUrlSumsFromSection(section):\n curPath = os.path.abspath(os.path.dirn... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re,itertools
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def __init__(self,features_map):
self.features_map = features_map
def __new__(cls,features_map):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in features_map.iteritems()])
cls._inst = super(CaptchaProfile, cls).__new__(cls)
return cls._inst
def match(self, im):
#st = time.time()
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im),'\n'
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(self.features_map, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
result = self.features_map[minimal]
self.catagory_FEATURES_MAP__[imageData] = result
return result
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def splitAgorim(self, im, top,bottom):
xsize, ysize = im.size
pixels = im.load()
zeroArr = []
for x in xrange(xsize):
flag = True
for y in xrange(ysize):
if pixels[x,y] != 255:
flag = False
break
if flag or x == 0:
zeroArr.append(x)
zeroArr = [(value - index ,value) for index,value in enumerate(zeroArr)]
retd = []
for key, group in itertools.groupby(zeroArr, lambda x: x[0]):
ret = [t[1] for t in group]
retd.append((ret[0],ret[-1]))
l = len(retd)
i = 0
dd = []
while i < l - 1 :
pre = retd[i][1] + 1
next = retd[i + 1][0]
# if 2 < next - pre < 7:
# nPre = retd[i + 1][1]
# nNext = retd[i + 2][0]
# if 2 < nNext - nPre < 7:
# dd.append((pre,4,nNext,16))
# i = i + 2
# continue
# print (pre,4,next,16)
dd.append((pre,top,next,bottom))
i = i + 1
return dd
def split(self, im, top,bottom):
ddArr = self.splitAgorim(im, top, bottom)
return (im.crop(idt) for idt in ddArr[1:])
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| [
[
8,
0,
0.0374,
0.0374,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0642,
0.0053,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.0695,
0.0053,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"import Image",
"import re,itertools",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
" import psyco",
" psyco.full()",
"class CaptchaAlgorithm(object)... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from medicine.kxr.kxr_feature import KXR_FEATURES_MAP__
import Image
import os,sys
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_kxr(CaptchaProfile):
def __init__(self,features_map = KXR_FEATURES_MAP__):
super(CaptchaProfile_kxr,self).__init__(features_map)
def __new__(cls,features_map = KXR_FEATURES_MAP__):
return super(CaptchaProfile_kxr, cls).__new__(cls,features_map)
def getStandardArrNum():
curPath = os.path.dirname(sys.argv[0])
imStand = os.path.join(curPath,'price2.png')
im = Image.open(imStand)
#print im.size
#cia = CaptchaImageAlgorithm()
cpKxr = CaptchaProfile_kxr()
ckDict = {}
for i in xrange(10):
for j in xrange(11):
xt = 14*(j+1)
if xt > 146:
xt = 146
px = (14*j,16 + (i-1)*15,xt,15 + i*15)
seg = im.crop(px)
ckDict[(px[0],px[1] - 1)] = cpKxr.match(seg)
return ckDict
class CkDict(object):
ckDict = None
@classmethod
def getCapNum(cls,px):
if cls.ckDict is None:
cls.ckDict = getStandardArrNum()
return cls.ckDict.get(px,None)
if __name__ == '__main__':
while True:
getStandardArrNum()
#===========================================================================
# im = Image.open(r'c:\1.png')
# im2 = Image.open(r'c:\1.png')
# dt = im.getdata()
# print im.size
# it1 = im.crop((15, 3, 21, 11))
# it2 = im.crop((23, 3, 29, 11))
# it3 = im.crop((31, 3, 37, 11))
# it4 = im.crop((39, 3, 45, 11))
# it5 = im.crop((47, 3, 49, 11))
# it6 = im.crop((51, 3, 57, 11))
# it7 = im.crop((59, 3, 65, 11))
# cia = CaptchaImageAlgorithm()
# s7 = cia.GetBinaryMap(it1)
# print s7
#===========================================================================
| [
[
8,
0,
0.0886,
0.0886,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1519,
0.0127,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1646,
0.0127,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from medicine.kxr.kxr_feature import KXR_FEATURES_MAP__",
"import Image",
"import os,sys",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2012-02-29
@author: zhongfeng
'''
KXR_FEATURES_MAP__ = {
'''
______
______
______
______
______
______
______
______
______
______
______
__##__
__##__
______
'''
:
'.',
'''
____####______
___##___##____
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
___##___##____
____####______
____####______
'''
:
'0',
'''
______##______
_____###______
__######______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
__#########___
____####______
'''
:
'1',
'''
___######_____
________##____
________###___
________###___
________###___
________##____
________#_____
_______#______
______________
______________
____##________
___###________
__##########__
______####____
'''
:
'2',
'''
____######____
_________##___
_________###__
_________##___
_________##___
_________#____
______####____
_________#____
__________##__
__________##__
__________##__
_________##___
____#####_____
____#####_____
'''
:
'3',
'''
________###___
_______####___
______#####___
_____#__###___
_____#__###___
____#___###___
___#____###___
__#########___
__###########_
________###___
________###___
________###___
________###___
________#_____
'''
:
'4',
'''
__#########___
__###_________
__###_________
__###_________
__###_________
__#######_____
________##____
_________##___
_________##___
_________##___
_________##___
________##____
___#####______
_______#______
'''
:
'5',
'''
______####____
____#_________
___#__________
__##__________
__##__________
__#######_____
__##_____##___
_###_____##___
__##_____##___
__##_____##___
__##_____##___
___##___##____
_____####_____
_____####_____
'''
:
'6',
'''
__#########___
_________##___
_________##___
_________#____
________##____
_______##_____
_______#______
______##______
______#_______
_____##_______
_____#________
____##________
___##_________
___##_________
'''
:
'7',
'''
____#####_____
__##____##____
__##_____##___
__##_____##___
___#__________
____#___#_____
____#####_____
_________#____
__##_____##___
_###_____##___
_###_____##___
__##_____#____
____#####_____
_____####_____
'''
:
'8',
'''
_____####_____
____#____##___
___##_____#___
__###_____##__
__###_____##__
___##_____##__
___##_____##__
____########__
__________##__
_________##___
_________##___
________##____
___#####______
______##______
'''
:
'9',
} | [
[
8,
0,
0.0274,
0.0228,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5251,
0.9543,
0,
0.66,
1,
713,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2012-02-29\n\n@author: zhongfeng\n'''",
"KXR_FEATURES_MAP__ = {\n '''\n______\n______\n______\n______\n______\n______"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from medicine.kxr.kxr_feature import KXR_FEATURES_MAP__
import Image
import os,sys
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_kxr(CaptchaProfile):
def __init__(self,features_map = KXR_FEATURES_MAP__):
super(CaptchaProfile_kxr,self).__init__(features_map)
def __new__(cls,features_map = KXR_FEATURES_MAP__):
return super(CaptchaProfile_kxr, cls).__new__(cls,features_map)
def getStandardArrNum():
curPath = os.path.dirname(sys.argv[0])
imStand = os.path.join(curPath,'price2.png')
im = Image.open(imStand)
#print im.size
#cia = CaptchaImageAlgorithm()
cpKxr = CaptchaProfile_kxr()
ckDict = {}
for i in xrange(10):
for j in xrange(11):
xt = 14*(j+1)
if xt > 146:
xt = 146
px = (14*j,16 + (i-1)*15,xt,15 + i*15)
seg = im.crop(px)
ckDict[(px[0],px[1] - 1)] = cpKxr.match(seg)
return ckDict
class CkDict(object):
ckDict = None
@classmethod
def getCapNum(cls,px):
if cls.ckDict is None:
cls.ckDict = getStandardArrNum()
return cls.ckDict.get(px,None)
if __name__ == '__main__':
while True:
getStandardArrNum()
#===========================================================================
# im = Image.open(r'c:\1.png')
# im2 = Image.open(r'c:\1.png')
# dt = im.getdata()
# print im.size
# it1 = im.crop((15, 3, 21, 11))
# it2 = im.crop((23, 3, 29, 11))
# it3 = im.crop((31, 3, 37, 11))
# it4 = im.crop((39, 3, 45, 11))
# it5 = im.crop((47, 3, 49, 11))
# it6 = im.crop((51, 3, 57, 11))
# it7 = im.crop((59, 3, 65, 11))
# cia = CaptchaImageAlgorithm()
# s7 = cia.GetBinaryMap(it1)
# print s7
#===========================================================================
| [
[
8,
0,
0.0886,
0.0886,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1519,
0.0127,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1646,
0.0127,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from medicine.kxr.kxr_feature import KXR_FEATURES_MAP__",
"import Image",
"import os,sys",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2012-02-29
@author: zhongfeng
'''
KXR_FEATURES_MAP__ = {
'''
______
______
______
______
______
______
______
______
______
______
______
__##__
__##__
______
'''
:
'.',
'''
____####______
___##___##____
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
__##_____##___
___##___##____
____####______
____####______
'''
:
'0',
'''
______##______
_____###______
__######______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
______##______
__#########___
____####______
'''
:
'1',
'''
___######_____
________##____
________###___
________###___
________###___
________##____
________#_____
_______#______
______________
______________
____##________
___###________
__##########__
______####____
'''
:
'2',
'''
____######____
_________##___
_________###__
_________##___
_________##___
_________#____
______####____
_________#____
__________##__
__________##__
__________##__
_________##___
____#####_____
____#####_____
'''
:
'3',
'''
________###___
_______####___
______#####___
_____#__###___
_____#__###___
____#___###___
___#____###___
__#########___
__###########_
________###___
________###___
________###___
________###___
________#_____
'''
:
'4',
'''
__#########___
__###_________
__###_________
__###_________
__###_________
__#######_____
________##____
_________##___
_________##___
_________##___
_________##___
________##____
___#####______
_______#______
'''
:
'5',
'''
______####____
____#_________
___#__________
__##__________
__##__________
__#######_____
__##_____##___
_###_____##___
__##_____##___
__##_____##___
__##_____##___
___##___##____
_____####_____
_____####_____
'''
:
'6',
'''
__#########___
_________##___
_________##___
_________#____
________##____
_______##_____
_______#______
______##______
______#_______
_____##_______
_____#________
____##________
___##_________
___##_________
'''
:
'7',
'''
____#####_____
__##____##____
__##_____##___
__##_____##___
___#__________
____#___#_____
____#####_____
_________#____
__##_____##___
_###_____##___
_###_____##___
__##_____#____
____#####_____
_____####_____
'''
:
'8',
'''
_____####_____
____#____##___
___##_____#___
__###_____##__
__###_____##__
___##_____##__
___##_____##__
____########__
__________##__
_________##___
_________##___
________##____
___#####______
______##______
'''
:
'9',
} | [
[
8,
0,
0.0274,
0.0228,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5251,
0.9543,
0,
0.66,
1,
713,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2012-02-29\n\n@author: zhongfeng\n'''",
"KXR_FEATURES_MAP__ = {\n '''\n______\n______\n______\n______\n______\n______"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
icsonRoot = ObuyUrlSummary(url=r'http://www.icson.com/portal.html', name='icson',
isRecursed=True, catagoryLevel=0)
class IcsonAllSortParser(RootCatagoryPageParser):
'''
从http://sz.icson.com/portal.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = 'http://sz.icson.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'protal_list'})
for t in allSort.findAll(name='div',attrs={'class':'item_hd'}):#一级分类
name,url = ParserUtils.parserTag_A(t.find(name='a'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':'item_bd'})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.getText()
url = ''.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findAll(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class IcsonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-6-10-20-0-{}--'
return '%s%s.%s' % (urlSegs[0].replace('--------',''), pageSeg, urlSegs[1])
def getTotal(self):
nextSeg = self.soup.find(name='a',attrs={'class':'page-next'})
if nextSeg != None:
t = nextSeg.findPreviousSibling(name='a').getText()
totalPage = int(t)
else:
totalPage = 1
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def parserPageInfos(self):
plist = self.soup.findAll(name='li',attrs={'class':'item_list'})
resultList = []
for prod in plist:
pNameSeg = prod.find(attrs={'class':'wrap_info'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
hotWords = pNameSeg.find(name='p',attrs={'class':'hot'}).getText()
adWords = hotWords
#===================================================================
# exGiftSeg = prod.find(name='ul',attrs = {'class':'list_gifts'})
# if exGiftSeg:
# allGift = []
# for index ,gift in enumerate(exGiftSeg(name = 'li')):
# eGift = '%s.%s' % (index ,gift.getText())
# allGift.append(eGift)
# adWords = '%s@%s' % (hotWords,''.join(allGift))
# print adWords
#===================================================================
pid = url.rsplit('-',1)[-1].split('.')[0]
t = prod.find(attrs={'class':'price_icson'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
commSeg = prod.find(name = 'p',attrs={'class':'comment'})
repu = 0.0
evalNum = 0
if commSeg:
repuSeg = commSeg.find(name = 'span',attrs = {'class':'icon_star'})
if repuSeg:
repu = ParserUtils.getDigit(repuSeg.b['style'])
repu = float(repu) * 5 / 100
evalNum = ParserUtils.getDigit(commSeg.a.getText())
imgSeg = prod.find(name='a',attrs={'class':'link_pic'})
imgUrl = ParserUtils.getImgUrl(imgSeg)
#evlNum = ParserUtils.getDigit(prod.find(name='p',attrs={'class':'comment'}).getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
name=pName, adWords=adWords,reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class IcsonSort4PageParser(IcsonSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:IcsonAllSortParser, 3:IcsonSort3PageParser, 4:IcsonSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testIcsonAllSortPage():
fileName = os.path.join(testFilePath,'portal.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://sz.icson.com/portal.html', name='Icson')
firstPage = IcsonAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
fileName = os.path.join(testFilePath,'icson_test.html')
with open(fileName, 'w') as fInput:
fInput.write(result.content)
sort3Page = IcsonSort3PageParser(result.content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testIcsonAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0414,
0.0414,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0769,
0.0059,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0828,
0.0059,
0,
0.66... | [
"'''\nCreated on 2011-8-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"from crawlerhttp import crawleRetries",
"icsonRoot = ObuyUrlSummary(url=r'http://www.icson.com/portal.html', name='icson', \n isRecursed=True, c... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-25
@author: zhongfeng
'''
from icson.icsonpageparser import parserDict,icsonRoot
from spider import ObuySpider
from crawlerhttp import CrawlResult
import os,sys
from logfacade import LoggerFactory
from upload import fileUpload
from spiderconfigparser import SpiderConfig
def main(fileName):
with open(fileName, 'r') as fInput:
content = fInput.read()
rootResult = CrawlResult(code=200, content=content)
from spiderconfigparser import getIncludeUrlSums,getExcludeUrlSums
includes = getIncludeUrlSums()
excludes = getExcludeUrlSums()
spider = ObuySpider(rootUrlSummary=icsonRoot, parserDict=parserDict,include=includes,exclude = excludes,
rootPageResult=rootResult, threadNum=SpiderConfig.getThreadNum())
if SpiderConfig.isStartSpider():
spider.spide()
LoggerFactory.shutdown()
if SpiderConfig.isUpload():
fileUpload()
if __name__ == '__main__':
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'portal.html')
main(fileName)
| [
[
8,
0,
0.1757,
0.1622,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2973,
0.027,
0,
0.66,
0.1111,
965,
0,
2,
0,
0,
965,
0,
0
],
[
1,
0,
0.3243,
0.027,
0,
0.66,
... | [
"'''\nCreated on 2011-8-25\n\n@author: zhongfeng\n\n'''",
"from icson.icsonpageparser import parserDict,icsonRoot",
"from spider import ObuySpider",
"from crawlerhttp import CrawlResult",
"import os,sys",
"from logfacade import LoggerFactory",
"from upload import fileUpload",
"from spiderconfigparser ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
icsonRoot = ObuyUrlSummary(url=r'http://www.icson.com/portal.html', name='icson',
isRecursed=True, catagoryLevel=0)
class IcsonAllSortParser(RootCatagoryPageParser):
'''
从http://sz.icson.com/portal.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = 'http://sz.icson.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'protal_list'})
for t in allSort.findAll(name='div',attrs={'class':'item_hd'}):#一级分类
name,url = ParserUtils.parserTag_A(t.find(name='a'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':'item_bd'})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.getText()
url = ''.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findAll(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class IcsonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-6-10-20-0-{}--'
return '%s%s.%s' % (urlSegs[0].replace('--------',''), pageSeg, urlSegs[1])
def getTotal(self):
nextSeg = self.soup.find(name='a',attrs={'class':'page-next'})
if nextSeg != None:
t = nextSeg.findPreviousSibling(name='a').getText()
totalPage = int(t)
else:
totalPage = 1
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def parserPageInfos(self):
plist = self.soup.findAll(name='li',attrs={'class':'item_list'})
resultList = []
for prod in plist:
pNameSeg = prod.find(attrs={'class':'wrap_info'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
hotWords = pNameSeg.find(name='p',attrs={'class':'hot'}).getText()
adWords = hotWords
#===================================================================
# exGiftSeg = prod.find(name='ul',attrs = {'class':'list_gifts'})
# if exGiftSeg:
# allGift = []
# for index ,gift in enumerate(exGiftSeg(name = 'li')):
# eGift = '%s.%s' % (index ,gift.getText())
# allGift.append(eGift)
# adWords = '%s@%s' % (hotWords,''.join(allGift))
# print adWords
#===================================================================
pid = url.rsplit('-',1)[-1].split('.')[0]
t = prod.find(attrs={'class':'price_icson'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
commSeg = prod.find(name = 'p',attrs={'class':'comment'})
repu = 0.0
evalNum = 0
if commSeg:
repuSeg = commSeg.find(name = 'span',attrs = {'class':'icon_star'})
if repuSeg:
repu = ParserUtils.getDigit(repuSeg.b['style'])
repu = float(repu) * 5 / 100
evalNum = ParserUtils.getDigit(commSeg.a.getText())
imgSeg = prod.find(name='a',attrs={'class':'link_pic'})
imgUrl = ParserUtils.getImgUrl(imgSeg)
#evlNum = ParserUtils.getDigit(prod.find(name='p',attrs={'class':'comment'}).getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
name=pName, adWords=adWords,reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class IcsonSort4PageParser(IcsonSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:IcsonAllSortParser, 3:IcsonSort3PageParser, 4:IcsonSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testIcsonAllSortPage():
fileName = os.path.join(testFilePath,'portal.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://sz.icson.com/portal.html', name='Icson')
firstPage = IcsonAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'icson_2011-08-27_14-13-21.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://list.icson.com/311--------.html',
parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
fileName = os.path.join(testFilePath,'icson_test.html')
with open(fileName, 'w') as fInput:
fInput.write(result.content)
sort3Page = IcsonSort3PageParser(result.content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testIcsonAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0414,
0.0414,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0769,
0.0059,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0828,
0.0059,
0,
0.66... | [
"'''\nCreated on 2011-8-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"from crawlerhttp import crawleRetries",
"icsonRoot = ObuyUrlSummary(url=r'http://www.icson.com/portal.html', name='icson', \n isRecursed=True, c... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-25
@author: zhongfeng
'''
from icson.icsonpageparser import parserDict,icsonRoot
from spider import ObuySpider
from crawlerhttp import CrawlResult
import os,sys
from logfacade import LoggerFactory
from upload import fileUpload
from spiderconfigparser import SpiderConfig
def main(fileName):
with open(fileName, 'r') as fInput:
content = fInput.read()
rootResult = CrawlResult(code=200, content=content)
from spiderconfigparser import getIncludeUrlSums,getExcludeUrlSums
includes = getIncludeUrlSums()
excludes = getExcludeUrlSums()
spider = ObuySpider(rootUrlSummary=icsonRoot, parserDict=parserDict,include=includes,exclude = excludes,
rootPageResult=rootResult, threadNum=SpiderConfig.getThreadNum())
if SpiderConfig.isStartSpider():
spider.spide()
LoggerFactory.shutdown()
if SpiderConfig.isUpload():
fileUpload()
if __name__ == '__main__':
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'portal.html')
main(fileName)
| [
[
8,
0,
0.1757,
0.1622,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2973,
0.027,
0,
0.66,
0.1111,
965,
0,
2,
0,
0,
965,
0,
0
],
[
1,
0,
0.3243,
0.027,
0,
0.66,
... | [
"'''\nCreated on 2011-8-25\n\n@author: zhongfeng\n\n'''",
"from icson.icsonpageparser import parserDict,icsonRoot",
"from spider import ObuySpider",
"from crawlerhttp import CrawlResult",
"import os,sys",
"from logfacade import LoggerFactory",
"from upload import fileUpload",
"from spiderconfigparser ... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import time
import random
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, crawle
urlsProxy = ["http://proxy.ipcn.org/proxylist.html"]
#urlsProxy = ["http://www.proxycn.com/html_proxy/http-1.html"]
desSite = 'http://www.360buy.com'
class ChoiceProxy(object):
proxyList = []
def __init__(self):
pass
def __new__(cls):
if '_inst' not in vars(cls):
cls.__initProxyList()
cls._inst = super(ChoiceProxy, cls).__new__(cls)
return cls._inst
@classmethod
def __initProxyList(cls):
ipcnProxyPageResult = crawle(urlsProxy[0])
if ipcnProxyPageResult.code == 200:
#soup = BeautifulSoup(ipcnProxyPageResult.content)
#proxyContents = soup.find('pre').contents[0]
p = re.compile(r'(\d+\.\d+\.\d+\.\d+:[0-9]+)')
for proxyIp in p.findall(ipcnProxyPageResult.content):
if(cls.__testProxy(proxyIp)):
print proxyIp
cls.proxyList.append(proxyIp)
@classmethod
def __testProxy(cls, proxy):
proxyDicts = {'http':proxy}
start = time.time()
result = crawle(desSite, proxy = proxyDicts)
end = time.time()
estime = end - start
print proxy, estime
if result.code != 200 or estime > 10:
return False
return True
@staticmethod
def choice():
if len(ChoiceProxy.proxyList) == 0:
return None
return random.choice(ChoiceProxy.proxyList)
def choiceHttpProxy():
return {'http':ChoiceProxy.choice()}
if __name__ == '__main__':
for i in range(10):
print ChoiceProxy().choice()
| [
[
1,
0,
0.0656,
0.0164,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.082,
0.0164,
0,
0.66,
0.1111,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0984,
0.0164,
0,
0... | [
"import re",
"import time",
"import random",
"from BeautifulSoup import BeautifulSoup",
"from crawlerhttp import UrlSummary, crawle",
"urlsProxy = [\"http://proxy.ipcn.org/proxylist.html\"]",
"desSite = 'http://www.360buy.com'",
"class ChoiceProxy(object):\n proxyList = []\n def __init__(self):\... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
SUNING_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_######_
########
########
###__###
###__###
##____##
##____##
##____##
###__###
###__###
########
########
_######_ '''
:
'0',
'''
___##
__###
#####
#####
##_##
___##
___##
___##
___##
___##
___##
___##
___##
'''
:
'1',
'''
_######_
########
########
###__###
_____###
_____###
____####
___####_
__####__
_####___
####____
########
########
'''
:
'2',
'''
_#####__
#######_
###_###_
_____##_
____###_
___####_
___#####
_____###
_____###
###__###
########
########
_######_
'''
:
'3',
'''
____###__
____###__
___####__
__#####__
__#####__
_###_##__
####_##__
###__##__
#########
#########
_____##__
_____##__
_____##__
'''
:
'4',
'''
#######_
#######_
###_____
###_____
#######_
########
###_####
_____###
______##
###__###
########
########
_######_
'''
:
'5',
'''
_######_
_#######
########
###_____
###_____
#######_
########
########
###__###
###__###
########
########
_######_
'''
:
'6',
'''
########
########
_____###
____###_
____###_
___###__
___###__
___###__
__###___
__###___
__###___
__###___
__###___
'''
:
'7',
'''
_######_
########
########
##____##
###__###
########
########
########
###__###
##___###
########
########
_######_
'''
:
'8',
'''
_######_
########
########
###__###
###__###
########
########
_#######
_____###
_____###
###_####
#######_
_######_
'''
:
'9',
} | [
[
8,
0,
0.0284,
0.0237,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5261,
0.9526,
0,
0.66,
1,
143,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"SUNING_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from suning.suning_feature import SUNING_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_suning(CaptchaProfile):
def __init__(self,features_map = SUNING_FEATURES_MAP):
super(CaptchaProfile_suning,self).__init__(features_map)
def __new__(cls,features_map = SUNING_FEATURES_MAP):
return super(CaptchaProfile_suning, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (255,255,255,255):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 3,bottom = 16):
ddArr = self.splitAgorim(im, top, bottom)
return (im.crop(idt) for idt in ddArr)
def captcha_suning(filename):
return captcha(filename, CaptchaProfile_suning())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "789.png")
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (255,255,255,255):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
print captcha_suning(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.0745,
0.0745,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1277,
0.0106,
0,
0.66,
0.0714,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1383,
0.0106,
0,
0.66... | [
"'''\nCreated on 2011-7-26\n\n国美价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from suning.suning_feature import SUNING_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\ne... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from suning.suning_feature import SUNING_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_suning(CaptchaProfile):
def __init__(self,features_map = SUNING_FEATURES_MAP):
super(CaptchaProfile_suning,self).__init__(features_map)
def __new__(cls,features_map = SUNING_FEATURES_MAP):
return super(CaptchaProfile_suning, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (255,255,255,255):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 3,bottom = 16):
ddArr = self.splitAgorim(im, top, bottom)
return (im.crop(idt) for idt in ddArr)
def captcha_suning(filename):
return captcha(filename, CaptchaProfile_suning())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "789.png")
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (255,255,255,255):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
print captcha_suning(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.0745,
0.0745,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1277,
0.0106,
0,
0.66,
0.0714,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1383,
0.0106,
0,
0.66... | [
"'''\nCreated on 2011-7-26\n\n国美价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from suning.suning_feature import SUNING_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\ne... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
SUNING_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_######_
########
########
###__###
###__###
##____##
##____##
##____##
###__###
###__###
########
########
_######_ '''
:
'0',
'''
___##
__###
#####
#####
##_##
___##
___##
___##
___##
___##
___##
___##
___##
'''
:
'1',
'''
_######_
########
########
###__###
_____###
_____###
____####
___####_
__####__
_####___
####____
########
########
'''
:
'2',
'''
_#####__
#######_
###_###_
_____##_
____###_
___####_
___#####
_____###
_____###
###__###
########
########
_######_
'''
:
'3',
'''
____###__
____###__
___####__
__#####__
__#####__
_###_##__
####_##__
###__##__
#########
#########
_____##__
_____##__
_____##__
'''
:
'4',
'''
#######_
#######_
###_____
###_____
#######_
########
###_####
_____###
______##
###__###
########
########
_######_
'''
:
'5',
'''
_######_
_#######
########
###_____
###_____
#######_
########
########
###__###
###__###
########
########
_######_
'''
:
'6',
'''
########
########
_____###
____###_
____###_
___###__
___###__
___###__
__###___
__###___
__###___
__###___
__###___
'''
:
'7',
'''
_######_
########
########
##____##
###__###
########
########
########
###__###
##___###
########
########
_######_
'''
:
'8',
'''
_######_
########
########
###__###
###__###
########
########
_#######
_____###
_____###
###_####
#######_
_######_
'''
:
'9',
} | [
[
8,
0,
0.0284,
0.0237,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5261,
0.9526,
0,
0.66,
1,
143,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"SUNING_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from suning.suningparser import parserDict,sunningRoot
from spider import main
if __name__ == '__main__':
main(sunningRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
150,
0,
2,
0,
0,
150,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from suning.suningparser import parserDict,sunningRoot",
"from spider import main",
"if __name__ == '__main__':\n main(sunningRoot,parserDict)",
" main(sunningRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from suning.suningparser import parserDict,sunningRoot
from spider import main
if __name__ == '__main__':
main(sunningRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
150,
0,
2,
0,
0,
150,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from suning.suningparser import parserDict,sunningRoot",
"from spider import main",
"if __name__ == '__main__':\n main(sunningRoot,parserDict)",
" main(sunningRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from efeihu.efeihupageparser import parserDict,efeihuRoot
from spider import main
if __name__ == '__main__':
main(efeihuRoot,parserDict) | [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
263,
0,
2,
0,
0,
263,
0,
0
],
[
1,
0,
0.8125,
0.0625,
0,
0.66,... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from efeihu.efeihupageparser import parserDict,efeihuRoot",
"from spider import main",
"if __name__ == '__main__':\n main(efeihuRoot,parserDict)",
" main(efeihuRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-22
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import urlparse
from spiderconfigparser import SpiderConfig
efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
class EfeihuAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'sitesort'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'sort_hd_[0-9]*')}):#一级分类
name = t.h3.a.contents[0]
url = t.h3.a['href']
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':'subitem'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class EfeihuSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
return self.rootUrlSummary.url.replace('--1','--{}')
def getTotal(self):
s = self.soup.find(name='div',attrs = {'id':'ctl00_ContentPlaceHolder1_ucProductItemWithPager1_AspNetPager_down'})
if s is None:
pageNum = 1
else:
a = s(name = 'a',attrs={'class':'btn_next'})[-1]
name,url = ParserUtils.parserTag_A(a)
pageNum = url.split('/')[-1].split('.')[0].split('-')[-1]
pageNum = int(pageNum)
if pageNum > SpiderConfig.getMaxPage():
pageNum = SpiderConfig.getMaxPage()
return pageNum
def __getSingleProdDetail(self, prod):
infoSeg = prod.find(attrs={'class':'infor'})
pNameHref = infoSeg.find(name='a',attrs={'class':'name'})
pName, url = ParserUtils.parserTag_A(pNameHref)
url = ''.join(('http://www.efeihu.com',url))
pid = url.split('/')[-1].split('.')[0]
adwords = infoSeg.find(name='p',attrs={'class':'promtn'}).getText()
t = infoSeg.find(name='span', attrs={'class':'price_e'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = infoSeg.find(name='span',attrs={'class':'price_del'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
evalNum = ParserUtils.getDigit(infoSeg.find(name='div',attrs={'class':'comment'}).a.getText())
imgUrlSeg = prod.find(name='a', attrs={'class':'img'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice,
pubPrice=pastPrice, name=pName, adWords=adwords,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'id':'prolist'})
resultList = []
for prod in plist(name='li',attrs={'class':'m_pro'}):
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class EfeihuSort4PageParser(EfeihuSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:EfeihuAllSortParser, 3:EfeihuSort3PageParser, 4:EfeihuSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
fileName = os.path.join(testFilePath,'efeihu.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
firstPage = EfeihuAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.efeihu.com/Products/89-0-0-0-0-0-40--1.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = EfeihuSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Page()
| [
[
8,
0,
0.049,
0.049,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0839,
0.007,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0909,
0.007,
0,
0.66,
... | [
"'''\nCreated on 2011-11-22\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"import urlparse",
"from spiderconfigparser import SpiderConfig",
"efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')",
"class EfeihuAllSortParser(RootCatagoryPageParser): \... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from efeihu.efeihupageparser import parserDict,efeihuRoot
from spider import main
if __name__ == '__main__':
main(efeihuRoot,parserDict) | [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
263,
0,
2,
0,
0,
263,
0,
0
],
[
1,
0,
0.8125,
0.0625,
0,
0.66,... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from efeihu.efeihupageparser import parserDict,efeihuRoot",
"from spider import main",
"if __name__ == '__main__':\n main(efeihuRoot,parserDict)",
" main(efeihuRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-22
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import urlparse
from spiderconfigparser import SpiderConfig
efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
class EfeihuAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'sitesort'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'sort_hd_[0-9]*')}):#一级分类
name = t.h3.a.contents[0]
url = t.h3.a['href']
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':'subitem'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class EfeihuSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
return self.rootUrlSummary.url.replace('--1','--{}')
def getTotal(self):
s = self.soup.find(name='div',attrs = {'id':'ctl00_ContentPlaceHolder1_ucProductItemWithPager1_AspNetPager_down'})
if s is None:
pageNum = 1
else:
a = s(name = 'a',attrs={'class':'btn_next'})[-1]
name,url = ParserUtils.parserTag_A(a)
pageNum = url.split('/')[-1].split('.')[0].split('-')[-1]
pageNum = int(pageNum)
if pageNum > SpiderConfig.getMaxPage():
pageNum = SpiderConfig.getMaxPage()
return pageNum
def __getSingleProdDetail(self, prod):
infoSeg = prod.find(attrs={'class':'infor'})
pNameHref = infoSeg.find(name='a',attrs={'class':'name'})
pName, url = ParserUtils.parserTag_A(pNameHref)
url = ''.join(('http://www.efeihu.com',url))
pid = url.split('/')[-1].split('.')[0]
adwords = infoSeg.find(name='p',attrs={'class':'promtn'}).getText()
t = infoSeg.find(name='span', attrs={'class':'price_e'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = infoSeg.find(name='span',attrs={'class':'price_del'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
evalNum = ParserUtils.getDigit(infoSeg.find(name='div',attrs={'class':'comment'}).a.getText())
imgUrlSeg = prod.find(name='a', attrs={'class':'img'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice,
pubPrice=pastPrice, name=pName, adWords=adwords,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'id':'prolist'})
resultList = []
for prod in plist(name='li',attrs={'class':'m_pro'}):
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class EfeihuSort4PageParser(EfeihuSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EfeihuSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:EfeihuAllSortParser, 3:EfeihuSort3PageParser, 4:EfeihuSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
fileName = os.path.join(testFilePath,'efeihu.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')
firstPage = EfeihuAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.efeihu.com/Products/89-0-0-0-0-0-40--1.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = EfeihuSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Page()
| [
[
8,
0,
0.049,
0.049,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0839,
0.007,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0909,
0.007,
0,
0.66,
... | [
"'''\nCreated on 2011-11-22\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"import urlparse",
"from spiderconfigparser import SpiderConfig",
"efeihuRoot = ObuyUrlSummary(url=r'http://www.efeihu.com/', name='efeihu')",
"class EfeihuAllSortParser(RootCatagoryPageParser): \... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-9-24
@author: zhongfeng
'''
import re,os,sys
import chardet
from pageparser import ObuyUrlSummary
from utils import Singleton
from ConfigParser import ConfigParser, NoOptionError
def __getUrlSumsFromSection(section):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'urls.cfg')
print 'spider.cfg full path:%s' % fileName
urls = list()
if not os.path.exists(fileName):
return urls
regx = r'\[%s\]' % section
includeSecRegx = re.compile(regx)
otherSecRegx = re.compile(r'\[.*\]')
flag = False
with file(fileName) as inputFile:
for line in inputFile:
encoding = chardet.detect(line)['encoding']
line = line.decode(encoding,'ignore')
if (not flag) and includeSecRegx.match(line):
flag = True
elif flag:
if otherSecRegx.match(line):
break
if line.strip() != '':
line = ' '.join(line.split())
ret = line.split(',')
ret = [it.strip() for it in ret]
urlSumm = ObuyUrlSummary(name = ret[1],url = ret[0],catagoryLevel = int(ret[2]))
urls.append(urlSumm)
return urls
def getIncludeUrlSums():
return __getUrlSumsFromSection('include')
def getExcludeUrlSums():
return __getUrlSumsFromSection('exclude')
class SpiderConfig(Singleton):
@classmethod
def _init(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'spider.conf')
cls.cf = ConfigParser()
cls.cf.read(fileName)
@classmethod
def getConfig(cls,option):
if not hasattr(cls, 'cf'):
cls._init()
try:
return cls.cf.get('conf',option)
except Exception:
pass
@classmethod
def getMaxPage(cls):
ret = cls.getConfig('max_page')
if ret:
return int(ret)
return 50
@classmethod
def getThreadNum(cls):
threadNum = cls.getConfig('thread_num')
if threadNum:
return int(threadNum)
return 10
@classmethod
def getProxy(cls):
return cls.getConfig('ftp_proxy')
@classmethod
def isStartSpider(cls):
flag = int(cls.getConfig('is_spider'))
if flag is not None:
return flag
return True
@classmethod
def isUpload(cls):
flag = int(cls.getConfig('is_upload'))
if flag is not None:
return flag
return True
if __name__ == '__main__':
print SpiderConfig.getMaxPage()
print SpiderConfig.getThreadNum()
print SpiderConfig.getProxy()
| [
[
8,
0,
0.0577,
0.0481,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0962,
0.0096,
0,
0.66,
0.1,
540,
0,
3,
0,
0,
540,
0,
0
],
[
1,
0,
0.1058,
0.0096,
0,
0.66,
... | [
"'''\nCreated on 2011-9-24\n\n@author: zhongfeng\n'''",
"import re,os,sys",
"import chardet",
"from pageparser import ObuyUrlSummary",
"from utils import Singleton",
"from ConfigParser import ConfigParser, NoOptionError",
"def __getUrlSumsFromSection(section):\n curPath = os.path.abspath(os.path.dirn... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re,itertools
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def __init__(self,features_map):
self.features_map = features_map
def __new__(cls,features_map):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in features_map.iteritems()])
cls._inst = super(CaptchaProfile, cls).__new__(cls)
return cls._inst
def match(self, im):
#st = time.time()
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im),'\n'
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(self.features_map, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
result = self.features_map[minimal]
self.catagory_FEATURES_MAP__[imageData] = result
return result
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def splitAgorim(self, im, top,bottom):
xsize, ysize = im.size
pixels = im.load()
zeroArr = []
for x in xrange(xsize):
flag = True
for y in xrange(ysize):
if pixels[x,y] != 255:
flag = False
break
if flag or x == 0:
zeroArr.append(x)
zeroArr = [(value - index ,value) for index,value in enumerate(zeroArr)]
retd = []
for key, group in itertools.groupby(zeroArr, lambda x: x[0]):
ret = [t[1] for t in group]
retd.append((ret[0],ret[-1]))
l = len(retd)
i = 0
dd = []
while i < l - 1 :
pre = retd[i][1] + 1
next = retd[i + 1][0]
# if 2 < next - pre < 7:
# nPre = retd[i + 1][1]
# nNext = retd[i + 2][0]
# if 2 < nNext - nPre < 7:
# dd.append((pre,4,nNext,16))
# i = i + 2
# continue
# print (pre,4,next,16)
dd.append((pre,top,next,bottom))
i = i + 1
return dd
def split(self, im, top,bottom):
ddArr = self.splitAgorim(im, top, bottom)
return (im.crop(idt) for idt in ddArr[1:])
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| [
[
8,
0,
0.0374,
0.0374,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0642,
0.0053,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.0695,
0.0053,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"import Image",
"import re,itertools",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
" import psyco",
" psyco.full()",
"class CaptchaAlgorithm(object)... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType, crawleRetries
from time import strftime
import chardet, re
from urlparse import urlparse
from threadpool import WorkRequest
from crawlerhttp import crawle
from cStringIO import StringIO
from itertools import chain
encodingDict = {'360buy':'gb2312', 'newegg':'gb2312', 'dangdang':'gb2312', 'gome':'utf-8',
'amazon':'utf-8', 'coo8':'gb2312', 'suning':'utf-8','egou':'GBK',}#'efeihu':'utf-8'}
def reinqueue_proc(req, result):
urlsum = req[0]
pool = req[3]
if urlsum.stat == 0:
urlsum.stat = result.code
req = WorkRequest(getProductPrice, req, None,
callback=None)
pool.putRequest(req)
else:
print "Failed %s:%d" % (urlsum.url, result.code)
def getProductPrice(*req):
pimgUrlSumm = req[0]
result = crawleRetries(pimgUrlSumm)
proc_normal_result(req, result)
return result
def proc_normal_result(req, result):
args = req
captcha = req[4]
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha(StringIO(result.content))
resultList.append(prodDetail)
else:
reinqueue_proc(req, result)
class ObuyUrlSummary(UrlSummary):
'''
链接抽象类
'''
def __init__(self, url='', data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
isCrawle=True, isRecursed=True, catagoryLevel=0, retries = 4, parentPath=None,parent = None,
stat=0, errReason='', include=None, exclude=None):
super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType,retries)
self.name = name #分类名称
self.catagoryLevel = catagoryLevel #分类级别
self.parentPath = [] if parentPath is None else parentPath #路径
self.parent = parent
self.isCrawle = isCrawle #是否抓取
self.isRecursed = isRecursed #是否递归抓取
self.stat = stat #抓取的最终状态
self.errReason = errReason #错误原因
self.include = None #subUrl中应该包含的url列表
self.exclude = None #subUrl中剔除的url列表,如果include,exclude同时设置,则include规则优先
def getUrlSumAbstract(self):
return self.name, self.url, self.catagoryLevel
def __str__(self):
return str(vars(self))
__repr__ = __str__
class ParserResult(object):
def logstr(self):
pass
def convertToUnicode(dataStr, siteName):
if isinstance(dataStr, str):
encoding = encodingDict.get(siteName, None)
if encoding is None:
encoding = chardet.detect(dataStr)['encoding']
encodingDict[siteName] = encoding
dataStr = dataStr.decode(encoding, 'ignore')
return dataStr
class Parser(object):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
self.rootUrlSummary = rootUrlSummary
self.include = include
self.exclude = exclude
siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]
self.dataStr = convertToUnicode(dataStr, siteName)
self.soup = BeautifulSoup(self.dataStr, convertEntities=BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
@staticmethod
def compareUrlSumm(urla, urlb):
if urla.url != None and len(urla.url) > 0:
return urla.url == urlb.url
elif urla.name != None and len(urla.name) > 0:
return urla.name == urlb.name
else:
return False
@staticmethod
def urlSummContain(filterArr, finalUrlSum):
#print finalUrlSum.name,finalUrlSum.url
for urlsumm in filterArr:
#print urlsumm.name,urlsumm.url
if Parser.compareUrlSumm(urlsumm, finalUrlSum):
return True
else:
for parent in finalUrlSum.parentPath:
#print parent.name,parent.url
if Parser.compareUrlSumm(urlsumm, parent):
return True
return False
def filterUrlList(self, finalUrlList):
filterResult = finalUrlList
if self.include != None and len(self.include) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if Parser.urlSummContain(self.include, finalUrlSum)]
elif self.exclude != None and len(self.exclude) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if not Parser.urlSummContain(self.exclude, finalUrlSum)]
return filterResult
def parserPageInfos(self):
'''
返回ParserResult组成的list
'''
pass
def parserSubUrlSums(self):
pass
def getParser(level,parserDict):
return parserDict.get(level,None)
class ParserUtils(object):
'''
html标签解析类,return (name,url)
'''
@staticmethod
def parserTag_A(a):
return a.getText().strip(), a['href'].strip()
@staticmethod
def getPrice(sPrice):
if not sPrice:
return '0.00'
'''¥4899.00变为4899.00'''
sPrice = sPrice.replace(u',', '')
regx = u'[0-9]+.[0-9]+'
p = re.compile(regx)
ret = p.search(sPrice)
if ret is None:
return '0.00'
return ret.group()
@staticmethod
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
@staticmethod
def getImgUrl(imgTag):
if imgTag is None:
return ''
return imgTag.img['src']
class RootCatagoryPageParser(Parser):
'''
根站点分类解析父类,获取所有的三级分类的ObuyUrlSummary
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_N(self, url, name, parent, isCrawle=True,firstFinalPage = False):
'''
构造各级节点逻辑
'''
sort_n_urlsum = ObuyUrlSummary(url=url, name=name, isCrawle=isCrawle)
sort_n_urlsum.parentPath = []
sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
sort_n_urlsum.parentPath.extend(parent.parentPath)
sort_n_urlsum.parentPath.append(parent)
if firstFinalPage:
sort_n_urlsum.parent = sort_n_urlsum
else:
sort_n_urlsum.parent = parent
return sort_n_urlsum
def getBaseSort3UrlSums(self):
pass
def parserSubUrlSums(self):
result = self.getBaseSort3UrlSums()
return self.filterUrlList(result)
class Sort3PageParser(Parser):
'''
三级页面解析类,
a.负责获取当前分类的所有的后续页面的UrlSummary
b.负责获取页面的所有商品的信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_4(self, url):
sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
catagoryLevel=4)
sort4_urlsum.parentPath = []
sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
sort4_urlsum.parentPath.append(self.rootUrlSummary)
sort4_urlsum.parent = self.rootUrlSummary.parent
return sort4_urlsum
def getTotal(self):
pass
def nextPageUrlPattern(self):
pass
def buildSort_4UrlSums(self):
finalUrlList = []
totalPage = self.getTotal()
if totalPage > 1:
for pageNum in range(2, totalPage + 1):
url = self.nextPageUrlPattern().format(str(pageNum))
finalUrlList.append(self.buildSort_4(url))
return finalUrlList
def getSort4PageUrlSums(self):
return self.buildSort_4UrlSums()
def parserSubUrlSums(self):
result = self.getSort4PageUrlSums()
return self.filterUrlList(result)
def seEncode(ustr,encoding='gb18030'):
if ustr is None:
return ''
if isinstance(ustr,unicode):
return ustr.encode(encoding,'ignore')
else:
return str(ustr)
class ProductDetails(ParserResult):
'''
商品详细信息
'''
def __init__(self, name='', imageUrl='', productId='', catagory=None, fullUrl='', pubPrice='0.00',
privPrice='0.00', adWords='', reputation='0', evaluateNum='0', updateTime=None):
self.name = name #商品名称
self.imageUrl = imageUrl #商品图片URL
self.productId = productId #商品在原网站的ID
self.catagory = catagory #商品所属分类
self.fullUrl = fullUrl #原始链接
self.pubPrice = pubPrice #商品标称的原价
self.privPrice = privPrice #商家卖价,没扣除广告折扣价格
self.adWords = adWords #促销信息,包括下单立减、返劵等
self.reputation = reputation #好评度
self.evaluateNum = evaluateNum #评论数
self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
def __getCatagoryAbs(self):
cat = self.catagory.parent
if isinstance(cat, ObuyUrlSummary):
return str((seEncode(cat.url), cat.catagoryLevel))
else:
return ''
#return ','.join([str((seEncode(cat.url), cat.catagoryLevel)) for cat in chain(self.catagory.parentPath, (self.catagory,))])
def __filterStr(self,s):
return ' '.join(seEncode(s).replace('|', ' ').split())
def logstr(self):
return '|'.join(map(self.__filterStr, (self.productId, self.privPrice, self.updateTime, self.name, self.evaluateNum, self.reputation,
self.adWords,self.fullUrl, self.imageUrl, self.__getCatagoryAbs())))
def __str__(self):
return str(vars(self))
__repr__ = __str__
| [
[
8,
0,
0.0236,
0.0236,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0404,
0.0034,
0,
0.66,
0.0435,
878,
0,
1,
0,
0,
878,
0,
0
],
[
1,
0,
0.0438,
0.0034,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from BeautifulSoup import BeautifulSoup",
"from crawlerhttp import UrlSummary, CrawlerType, crawleRetries",
"from time import strftime",
"import chardet, re",
"from urlparse import urlparse",
"from threadpool import WorkRe... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
GOME_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
______
_####_
##__##
##__##
##__##
##__##
##__##
##__##
##__##
_####_ '''
:
'0',
'''
__###__
_##_##_
##___##
##___##
##___##
##___##
##___##
##___##
_##_##_
__###__ '''
:
'0',
'''
__##__
####__
__##__
__##__
__##__
__##__
__##__
__##__
__##__
######
'''
:
'1',
'''
_####_
#___##
____##
____##
____##
___##_
__##__
_##___
##____
######
'''
:
'2',
'''
_#####_
#____##
_____##
____##_
_####__
____##_
_____##
_____##
#___##_
_####__
'''
:
'3',
'''
____##_
___###_
___###_
__#_##_
_#__##_
#___##_
#######
____##_
____##_
____##_
'''
:
'4',
'''
#######
#______
#______
#####__
____##_
_____##
_____##
_____##
#___##_
_####__
'''
:
'5',
'''
__####_
_##___#
_#_____
##_____
##_###_
###__##
##___##
##___##
_##__##
__####_
'''
:
'6',
'''
#######
_____##
____##_
____##_
___##__
___##__
__##___
__##___
_##____
_##____
'''
:
'7',
'''
_#####_
##___##
##___##
###_##_
_####__
_#####_
##__###
##___##
##___##
_#####_
'''
:
'8',
'''
_####__
##__##_
##___##
##___##
##__###
_###_##
_____##
_____##
#___##_
_####__
'''
:
'9',
} | [
[
8,
0,
0.0317,
0.0265,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5291,
0.9471,
0,
0.66,
1,
764,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"GOME_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from gome.gome_feature import GOME_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_gome(CaptchaProfile):
def __init__(self,features_map = GOME_FEATURES_MAP):
super(CaptchaProfile_gome,self).__init__(features_map)
def __new__(cls,features_map = GOME_FEATURES_MAP):
return super(CaptchaProfile_gome, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 6,bottom = 16):
return super(CaptchaProfile_gome,self).split(im,top,bottom)
def captcha_gome(filename):
return captcha(filename, CaptchaProfile_gome())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "0.png")
print captcha_gome(fileName)
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.0753,
0.0753,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.129,
0.0108,
0,
0.66,
0.0714,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1398,
0.0108,
0,
0.66,... | [
"'''\nCreated on 2011-7-26\n\n国美价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from gome.gome_feature import GOME_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gome.gomepageparser import parserDict,gomeRoot
from spider import main
if __name__ == '__main__':
main(gomeRoot,parserDict) | [
[
8,
0,
0.4286,
0.3571,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7143,
0.0714,
0,
0.66,
0.3333,
874,
0,
2,
0,
0,
874,
0,
0
],
[
1,
0,
0.7857,
0.0714,
0,
0.66... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from gome.gomepageparser import parserDict,gomeRoot",
"from spider import main",
"if __name__ == '__main__':\n main(gomeRoot,parserDict)",
" main(gomeRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
国美价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from gome.gome_feature import GOME_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_gome(CaptchaProfile):
def __init__(self,features_map = GOME_FEATURES_MAP):
super(CaptchaProfile_gome,self).__init__(features_map)
def __new__(cls,features_map = GOME_FEATURES_MAP):
return super(CaptchaProfile_gome, cls).__new__(cls,features_map)
def filter(self,im_raw):
pixdata_raw = im_raw.load()
imge_size = im_raw.size
im = Image.new('1',imge_size)
xsize,ysize = imge_size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
return im
def split(self, im,top = 6,bottom = 16):
return super(CaptchaProfile_gome,self).split(im,top,bottom)
def captcha_gome(filename):
return captcha(filename, CaptchaProfile_gome())
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "0.png")
print captcha_gome(fileName)
im_raw = Image.open(fileName)
pixdata_raw = im_raw.load()
#r,g,b,a = im.split()
im = im_raw.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
im = Image.new('1',im_raw.size)
xsize,ysize = im.size
pixdata = im.load()
for x in xrange(xsize):
for y in xrange(ysize):
if pixdata_raw[x,y] == (0,0,0,0):
pixdata[x,y] = 255
else:
pixdata[x,y] = 0
print CaptchaImageAlgorithm.GetBinaryMap(im)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.0753,
0.0753,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.129,
0.0108,
0,
0.66,
0.0714,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1398,
0.0108,
0,
0.66,... | [
"'''\nCreated on 2011-7-26\n\n国美价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from gome.gome_feature import GOME_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gome.gomepageparser import parserDict,gomeRoot
from spider import main
if __name__ == '__main__':
main(gomeRoot,parserDict) | [
[
8,
0,
0.4286,
0.3571,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7143,
0.0714,
0,
0.66,
0.3333,
874,
0,
2,
0,
0,
874,
0,
0
],
[
1,
0,
0.7857,
0.0714,
0,
0.66... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from gome.gomepageparser import parserDict,gomeRoot",
"from spider import main",
"if __name__ == '__main__':\n main(gomeRoot,parserDict)",
" main(gomeRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
GOME_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
##
##
'''
:
'.',
'''
______
_####_
##__##
##__##
##__##
##__##
##__##
##__##
##__##
_####_ '''
:
'0',
'''
__###__
_##_##_
##___##
##___##
##___##
##___##
##___##
##___##
_##_##_
__###__ '''
:
'0',
'''
__##__
####__
__##__
__##__
__##__
__##__
__##__
__##__
__##__
######
'''
:
'1',
'''
_####_
#___##
____##
____##
____##
___##_
__##__
_##___
##____
######
'''
:
'2',
'''
_#####_
#____##
_____##
____##_
_####__
____##_
_____##
_____##
#___##_
_####__
'''
:
'3',
'''
____##_
___###_
___###_
__#_##_
_#__##_
#___##_
#######
____##_
____##_
____##_
'''
:
'4',
'''
#######
#______
#______
#####__
____##_
_____##
_____##
_____##
#___##_
_####__
'''
:
'5',
'''
__####_
_##___#
_#_____
##_____
##_###_
###__##
##___##
##___##
_##__##
__####_
'''
:
'6',
'''
#######
_____##
____##_
____##_
___##__
___##__
__##___
__##___
_##____
_##____
'''
:
'7',
'''
_#####_
##___##
##___##
###_##_
_####__
_#####_
##__###
##___##
##___##
_#####_
'''
:
'8',
'''
_####__
##__##_
##___##
##___##
##__###
_###_##
_____##
_____##
#___##_
_####__
'''
:
'9',
} | [
[
8,
0,
0.0317,
0.0265,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5291,
0.9471,
0,
0.66,
1,
764,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"GOME_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-25
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')
class LusenAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
for t in self.soup.findAll(name='a',attrs={'class':'depth-1'}):#一级分类
name,url = ParserUtils.parserTag_A(t)
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class LusenSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = '--0--{}--index.html'
urlSeg = self.rootUrlSummary.url.rsplit('.',1)[0]
return '%s%s' % (urlSeg,pageSeg)
def getTotal(self):
s = self.soup.find(name='span',attrs = {'class':'pageall'})
if s is None:
pageNum = 1
else:
pageNum = s.getText()
totalPage = int(pageNum)
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def __getSingleProdDetail(self, prod):
pNameSeg = prod.find(attrs={'class':'goodinfo'})
pName, url = ParserUtils.parserTag_A(pNameSeg.a)
pid = prod['product']
t = prod.find(name='td', attrs={'class':'price_button'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
pastPrice = 0.00
imgUrlSeg = prod.find(name='td', attrs={'class':'goodpic'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
listSeg = self.soup.findAll(name='div',attrs={'id':re.compile(r'pdt-[0-9]*')})
for prod in listSeg:
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class LusenSort4PageParser(LusenSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:LusenAllSortParser, 3:LusenSort3PageParser, 4:LusenSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
rootUrlSum = ObuyUrlSummary(url=r'http://www.lusen.com', name='lusen')
content = getContentFromUrlSum(rootUrlSum)
firstPage = LusenAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.lusen.com/gallery-175.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = LusenSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.name
if __name__ == '__main__':
#testAllSortPage()
testSort3Page()
| [
[
8,
0,
0.056,
0.056,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.096,
0.008,
0,
0.66,
0.0667,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.104,
0.008,
0,
0.66,
... | [
"'''\nCreated on 2011-11-25\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')",
"class LusenAllSortParser(RootCatagoryPageParser): \n def __init__(self, d... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-25
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')
class LusenAllSortParser(RootCatagoryPageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
for t in self.soup.findAll(name='a',attrs={'class':'depth-1'}):#一级分类
name,url = ParserUtils.parserTag_A(t)
sort_3_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage=True)
sort_3_urlsum.catagoryLevel = 3
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class LusenSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = '--0--{}--index.html'
urlSeg = self.rootUrlSummary.url.rsplit('.',1)[0]
return '%s%s' % (urlSeg,pageSeg)
def getTotal(self):
s = self.soup.find(name='span',attrs = {'class':'pageall'})
if s is None:
pageNum = 1
else:
pageNum = s.getText()
totalPage = int(pageNum)
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def __getSingleProdDetail(self, prod):
pNameSeg = prod.find(attrs={'class':'goodinfo'})
pName, url = ParserUtils.parserTag_A(pNameSeg.a)
pid = prod['product']
t = prod.find(name='td', attrs={'class':'price_button'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
pastPrice = 0.00
imgUrlSeg = prod.find(name='td', attrs={'class':'goodpic'})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl=url, imageUrl=imgUrl, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
listSeg = self.soup.findAll(name='div',attrs={'id':re.compile(r'pdt-[0-9]*')})
for prod in listSeg:
prodDetail = self.__getSingleProdDetail(prod)
resultList.append(prodDetail)
return resultList
class LusenSort4PageParser(LusenSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(LusenSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:LusenAllSortParser, 3:LusenSort3PageParser, 4:LusenSort4PageParser}
''' test '''
import os,chardet
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
from crawlerhttp import getContentFromUrlSum
def testAllSortPage():
rootUrlSum = ObuyUrlSummary(url=r'http://www.lusen.com', name='lusen')
content = getContentFromUrlSum(rootUrlSum)
firstPage = LusenAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.lusen.com/gallery-175.html',
parentPath=[('test')], catagoryLevel=3)
content = getContentFromUrlSum(sort_3_urlsum)
sort3Page = LusenSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
for product in sort3Page.parserPageInfos():
print product.name
if __name__ == '__main__':
#testAllSortPage()
testSort3Page()
| [
[
8,
0,
0.056,
0.056,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.096,
0.008,
0,
0.66,
0.0667,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.104,
0.008,
0,
0.66,
... | [
"'''\nCreated on 2011-11-25\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"lusenRoot = ObuyUrlSummary(url=r'http://www.lusen.com/', name='lusen')",
"class LusenAllSortParser(RootCatagoryPageParser): \n def __init__(self, d... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from lusen.lusenpageparser import parserDict,lusenRoot
from spider import main
if __name__ == '__main__':
main(lusenRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
925,
0,
2,
0,
0,
925,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from lusen.lusenpageparser import parserDict,lusenRoot",
"from spider import main",
"if __name__ == '__main__':\n main(lusenRoot,parserDict)",
" main(lusenRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from lusen.lusenpageparser import parserDict,lusenRoot
from spider import main
if __name__ == '__main__':
main(lusenRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
925,
0,
2,
0,
0,
925,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from lusen.lusenpageparser import parserDict,lusenRoot",
"from spider import main",
"if __name__ == '__main__':\n main(lusenRoot,parserDict)",
" main(lusenRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import os
import hashlib
#DB parameter
def update360buyRepu():
conn = getConnect()
qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)
sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'
for result in qPage.queryForList(sql=sqlStr):
prodList = []
for prod in result:
id = prod[0]
repu = prod[1]
repu = repu * 5 / 100
print repu, id
prodList.append((repu, id))
print '+++'
batchUpdateProdBaseInfo(conn, prodList)
conn.close()
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
class MySQLQueryPagination(object):
'''MySQL 分页类的实现'''
def __init__(self,conn,numPerPage = 20):
self.conn = conn
self.numPerPage = numPerPage
def queryForList(self,sql,param = None):
totalPageNum = self.__calTotalPages(sql,param)
for pageIndex in xrange(totalPageNum):
yield self.__queryEachPage(sql,pageIndex,param)
def __createPaginaionQuerySql(self,sql,currentPageIndex):
startIndex = self.__calStartIndex(currentPageIndex)
qSql = r'select * from (%s) total_table limit %s,%s' % (sql,startIndex,self.numPerPage)
return qSql
def __queryEachPage(self,sql,currentPageIndex,param = None):
curs = initClientEncode(self.conn)
qSql = self.__createPaginaionQuerySql(sql, currentPageIndex)
if param is None:
curs.execute(qSql)
else:
curs.execute(qSql,param)
result = curs.fetchall()
curs.close()
return result
def __calStartIndex(self,currentPageIndex):
startIndex = currentPageIndex * self.numPerPage;
return startIndex;
def __calTotalRowsNum(self,sql,param = None):
''' 计算总行数 '''
tSql = r'select count(*) from (%s) total_table' % sql
curs = initClientEncode(self.conn)
if param is None:
curs.execute(tSql)
else:
curs.execute(tSql,param)
result = curs.fetchone()
curs.close()
totalRowsNum = 0
if result != None:
totalRowsNum = int(result[0])
return totalRowsNum
def __calTotalPages(self,sql,param):
''' 计算总页数 '''
totalRowsNum = self.__calTotalRowsNum(sql,param)
totalPages = 0;
if (totalRowsNum % self.numPerPage) == 0:
totalPages = totalRowsNum / self.numPerPage;
else:
totalPages = (totalRowsNum / self.numPerPage) + 1
return totalPages
def __calLastIndex(self, totalRows, totalPages,currentPageIndex):
'''计算结束时候的索引'''
lastIndex = 0;
if totalRows < self.numPerPage:
lastIndex = totalRows;
elif ((totalRows % self.numPerPage == 0)
or (totalRows % self.numPerPage != 0 and currentPageIndex < totalPages)) :
lastIndex = currentPageIndex * self.numPerPage
elif (totalRows % self.numPerPage != 0 and currentPageIndex == totalPages): # 最后一页
lastIndex = totalRows
return lastIndex
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def saveProdCat(rawCatId, siteId, parentId, url, name, parentPath,
level, self_cat_id = 0,cat_base_id = 0 ):
''' 保存各站点的分类信息 '''
conn = getConnect()
curs = initClientEncode(conn)
if parentId == '':
print url
sqlStr = '''INSERT INTO `prod_catagory` (`raw_cat_id` ,`site_id` ,`parent_id` ,`url` ,`name` ,`parent_path` ,`level` , self_cat_id, cat_base_id,`update_time` )
VALUES ( %s, %s, %s, %s, %s, %s, %s, now()) '''
param = [seEncode(pt) for pt in (rawCatId, siteId, parentId, url, name, parentPath, level,self_cat_id, cat_base_id,)]
curs.execute(sqlStr, param)
curs.close()
conn.close()
return int(curs.lastrowid)
def match55bigoCats(site_id,name):
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStrPattern = '''SELECT id,site_id,self_cat_id,name,url,cat_base_id FROM `prod_catagory` where (site_id =9 or site_id=%s) AND LEVEL =3 AND name LIKE '%%{name}%%' ''' #生成sql语句
sqlStr = sqlStrPattern.format(name = name)
param = [site_id]
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
def getCatBySiteIdAndLevel(site_id,level):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id ,name,parent_path FROM `prod_catagory` where site_id = %s and level = %s'
param = ( site_id,level)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_base_info` db proc
#===============================================================================
def saveProdBaseInfo(conn,site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time):
'''保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
param = [seEncode(pt) for pt in (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, params):
'''批量保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdId(conn, site_id, raw_id):
''' 获取prod_id '''
curs = initClientEncode(conn)
sqlStr = 'SELECT id FROM `prod_base_info` where site_id = %s and raw_id = %s'
param = (site_id, raw_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
if result != None:
return result[0]
def getAllRawProdIdsBySite(site_id):
'''获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT raw_id,id FROM `prod_base_info` where site_id = %s'
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_price_info` db proc
#===============================================================================
def saveProdPriceInfo(prod_id, real_price, cur_price, diff_price, adwords, coupon=0.00, ex_gift='',
order_cut=0.00, crash_cut=0.00, m_price=0.00, trans_price=0.00, other_dis=0.00, u_time = None):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
#if isinstance(u_time, basestring):
# u_time = strptime(u_time,"%Y-%m-%d %H:%M:%S")
param = [seEncode(pt) for pt in (prod_id, real_price, cur_price, m_price, diff_price, trans_price, other_dis, adwords, coupon, ex_gift, order_cut, crash_cut,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
conn.close()
return int(ret)
def batchSaveProdPriceInfo(conn, params):
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdPriceInfoFromProdId(conn, prod_id):
curs = initClientEncode(conn)
sqlStr = '''select real_price,cur_price,u_time,id from `prod_price_info` where prod_id=%s order by u_time DESC'''
param = (prod_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
if len(result) > 0:
return result
def getMd5Key(src):
m2 = hashlib.md5()
m2.update(src)
dest2 = int(m2.hexdigest(), 16)
return dest2
def getCatKey(url):
return str(getMd5Key(url))[0:16]
class SiteNameIDDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_site_dict = dict(getAllWebsiteBaseInfo())
cls._inst = super(SiteNameIDDict, cls).__new__(cls)
return cls._inst
def getSiteIdByName(self, siteName):
return self.t_site_dict[siteName]
def getSiteIdByName(siteName):
siteNameIDDict = SiteNameIDDict()
return siteNameIDDict.getSiteIdByName(siteName)
class ProdCatIdDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_cat_dict = dict()
cls._inst = super(ProdCatIdDict, cls).__new__(cls)
return cls._inst
def __getKey(self, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
def getProdCatId(self, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = self.__getKey(siteId, rawCatId)
value = self.t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
self.t_cat_dict[key] = value
return value
def getCatIdFromRawInfo(siteId, catUrl):
catIdDict = ProdCatIdDict()
ret = catIdDict.getProdCatId(siteId, catUrl)
if ret :
return ret[0]
def getProdInfoRawIDMapId(siteId):
return dict(getAllRawProdIdsBySite(siteId))#key:raw_id value:id
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test(siteName, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
fileName = os.path.join(testFilePath, '360buy_spider.log')
conn = getConnect()
params = list()
i = 0
prodIds = set([ t[0] for t in getAllRawProdIdsBySite(site_id)])
print len(prodIds)
import re
p = re.compile('[0-9]+')
p1 = re.compile(u'下单')
fOutput = open('c:t360buy_jian.log', 'w')
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
ret = line.split('|')
raw_id = ret[0]
if p.search(ret[6]) != None and p1.search(ret[6]) != None:
fOutput.write(ret[0] + ' ' + ret[6] + '\n')
#if getProdId(site_id,raw_id) != None:
if raw_id in prodIds:
#print '++++++++++++++++++++++++++'
continue
prodIds.add(raw_id)
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl)
u_time = ret[2]
#print raw_id , name , url , repu, eval_num, img_url ,cat_id
param = (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)
if cat_id == '':
print param
params.append(param)
i = i + 1
if i == 100:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
fOutput.close()
def batchUpdateProdBaseInfo(conn, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update prod_base_info_3c set repu=%s where id = %s'''
#sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in pBaseInfo])
curs.executemany(sqlPattern, wparams)
conn.commit()
curs.close()
if __name__ == '__main__':
for t in match55bigoCats('笔记本'):
print seEncode(t)
| [
[
1,
0,
0.0093,
0.0023,
0,
0.66,
0,
838,
0,
1,
0,
0,
838,
0,
0
],
[
1,
0,
0.0116,
0.0023,
0,
0.66,
0.0278,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0162,
0.0023,
0,
... | [
"import MySQLdb",
"import os",
"import hashlib",
"def update360buyRepu():\n conn = getConnect()\n qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)\n sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'\n for result in qPage.queryForList(sql=sqlStr):\n prodList = []\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import MySQLdb
import os
import hashlib
#DB parameter
def update360buyRepu():
conn = getConnect()
qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)
sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'
for result in qPage.queryForList(sql=sqlStr):
prodList = []
for prod in result:
id = prod[0]
repu = prod[1]
repu = repu * 5 / 100
print repu, id
prodList.append((repu, id))
print '+++'
batchUpdateProdBaseInfo(conn, prodList)
conn.close()
strHost = 'localhost'
strDB = 'bigo_db_new'
strUser = 'root'
strPasswd = ''
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '[%s]' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
#connect to DB
def getConnect(db=strDB, host=strHost, user=strUser, passwd=strPasswd, charset="utf8"):
return MySQLdb.connect(host=strHost, db=strDB, user=strUser, passwd=strPasswd, charset="utf8")
def initClientEncode(conn):
'''mysql client encoding=utf8'''
curs = conn.cursor()
curs.execute("SET NAMES utf8")
conn.commit()
return curs
class MySQLQueryPagination(object):
'''MySQL 分页类的实现'''
def __init__(self,conn,numPerPage = 20):
self.conn = conn
self.numPerPage = numPerPage
def queryForList(self,sql,param = None):
totalPageNum = self.__calTotalPages(sql,param)
for pageIndex in xrange(totalPageNum):
yield self.__queryEachPage(sql,pageIndex,param)
def __createPaginaionQuerySql(self,sql,currentPageIndex):
startIndex = self.__calStartIndex(currentPageIndex)
qSql = r'select * from (%s) total_table limit %s,%s' % (sql,startIndex,self.numPerPage)
return qSql
def __queryEachPage(self,sql,currentPageIndex,param = None):
curs = initClientEncode(self.conn)
qSql = self.__createPaginaionQuerySql(sql, currentPageIndex)
if param is None:
curs.execute(qSql)
else:
curs.execute(qSql,param)
result = curs.fetchall()
curs.close()
return result
def __calStartIndex(self,currentPageIndex):
startIndex = currentPageIndex * self.numPerPage;
return startIndex;
def __calTotalRowsNum(self,sql,param = None):
''' 计算总行数 '''
tSql = r'select count(*) from (%s) total_table' % sql
curs = initClientEncode(self.conn)
if param is None:
curs.execute(tSql)
else:
curs.execute(tSql,param)
result = curs.fetchone()
curs.close()
totalRowsNum = 0
if result != None:
totalRowsNum = int(result[0])
return totalRowsNum
def __calTotalPages(self,sql,param):
''' 计算总页数 '''
totalRowsNum = self.__calTotalRowsNum(sql,param)
totalPages = 0;
if (totalRowsNum % self.numPerPage) == 0:
totalPages = totalRowsNum / self.numPerPage;
else:
totalPages = (totalRowsNum / self.numPerPage) + 1
return totalPages
def __calLastIndex(self, totalRows, totalPages,currentPageIndex):
'''计算结束时候的索引'''
lastIndex = 0;
if totalRows < self.numPerPage:
lastIndex = totalRows;
elif ((totalRows % self.numPerPage == 0)
or (totalRows % self.numPerPage != 0 and currentPageIndex < totalPages)) :
lastIndex = currentPageIndex * self.numPerPage
elif (totalRows % self.numPerPage != 0 and currentPageIndex == totalPages): # 最后一页
lastIndex = totalRows
return lastIndex
#===============================================================================
# 表 `websit_base_info` db 操作
#===============================================================================
def getAllWebsiteBaseInfo():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT en_name,id FROM `websit_base_info` ' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `cat_base_config` db 操作
#===============================================================================
def getAllCatBaseConfig():
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''SELECT main_cat_id, baseinfo_table_name, priceinfo_cur_table_name, priceinfo_his_table_name,
en_name FROM `cat_base_config`''' #生成sql语句
curs.execute(sqlStr)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def saveProdCat(rawCatId, siteId, parentId, url, name, parentPath,
level, self_cat_id = 0,cat_base_id = 0 ):
''' 保存各站点的分类信息 '''
conn = getConnect()
curs = initClientEncode(conn)
if parentId == '':
print url
sqlStr = '''INSERT INTO `prod_catagory` (`raw_cat_id` ,`site_id` ,`parent_id` ,`url` ,`name` ,`parent_path` ,`level` , self_cat_id, cat_base_id,`update_time` )
VALUES ( %s, %s, %s, %s, %s, %s, %s, now()) '''
param = [seEncode(pt) for pt in (rawCatId, siteId, parentId, url, name, parentPath, level,self_cat_id, cat_base_id,)]
curs.execute(sqlStr, param)
curs.close()
conn.close()
return int(curs.lastrowid)
def match55bigoCats(site_id,name):
'''获取所有的站点的en_name与id信息'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStrPattern = '''SELECT id,site_id,self_cat_id,name,url,cat_base_id FROM `prod_catagory` where (site_id =9 or site_id=%s) AND LEVEL =3 AND name LIKE '%%{name}%%' ''' #生成sql语句
sqlStr = sqlStrPattern.format(name = name)
param = [site_id]
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def getCatIdFromRawCatID(raw_cat_id, site_id):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id,self_cat_id,cat_base_id FROM `prod_catagory` where raw_cat_id = %s and site_id = %s'
param = (raw_cat_id, site_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
conn.close()
if result != None:
return result
def getCatBySiteIdAndLevel(site_id,level):
'''获取分类id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT id ,name,parent_path FROM `prod_catagory` where site_id = %s and level = %s'
param = ( site_id,level)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_base_info` db proc
#===============================================================================
def saveProdBaseInfo(conn,site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time):
'''保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
param = [seEncode(pt) for pt in (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
return int(ret)
def batchSaveProdBaseInfo(conn, params):
'''批量保存商品基本信息到表:prod_base_info'''
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_base_info` (`site_id` ,`raw_id` ,`name` ,`url` ,`img_url` , `repu`, `eval_num`, `cat_id` ,`u_time` )
VALUES (%s , %s, %s, %s, %s, %s , %s, %s , %s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdId(conn, site_id, raw_id):
''' 获取prod_id '''
curs = initClientEncode(conn)
sqlStr = 'SELECT id FROM `prod_base_info` where site_id = %s and raw_id = %s'
param = (site_id, raw_id)
curs.execute(sqlStr, param)
result = curs.fetchone()
curs.close()
if result != None:
return result[0]
def getAllRawProdIdsBySite(site_id):
'''获取某一个站点的所有prod_id'''
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT raw_id,id FROM `prod_base_info` where site_id = %s'
param = (site_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
conn.close()
return result
#===============================================================================
# 表 `prod_price_info` db proc
#===============================================================================
def saveProdPriceInfo(prod_id, real_price, cur_price, diff_price, adwords, coupon=0.00, ex_gift='',
order_cut=0.00, crash_cut=0.00, m_price=0.00, trans_price=0.00, other_dis=0.00, u_time = None):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
#if isinstance(u_time, basestring):
# u_time = strptime(u_time,"%Y-%m-%d %H:%M:%S")
param = [seEncode(pt) for pt in (prod_id, real_price, cur_price, m_price, diff_price, trans_price, other_dis, adwords, coupon, ex_gift, order_cut, crash_cut,u_time)]
curs.execute(sqlStr, param)
conn.commit()
ret = curs.lastrowid
curs.close()
conn.close()
return int(ret)
def batchSaveProdPriceInfo(conn, params):
curs = initClientEncode(conn)
sqlStr = '''INSERT INTO `prod_price_info` (`prod_id` ,`real_price` ,`cur_price` ,`m_price` ,`diff_price` ,`trans_price`,`other_dis` ,
`adwords` ,`coupon` ,`ex_gift` ,`order_cut` ,`crash_cut`,`u_time` ) VALUES (%s, %s, %s, %s, %s, %s,%s, %s, %s,%s, %s, %s,%s)'''
wparams = list()
for param in params:
wparams.append([seEncode(pt) for pt in param])
curs.executemany(sqlStr, wparams)
conn.commit()
curs.close()
def getProdPriceInfoFromProdId(conn, prod_id):
curs = initClientEncode(conn)
sqlStr = '''select real_price,cur_price,u_time,id from `prod_price_info` where prod_id=%s order by u_time DESC'''
param = (prod_id)
curs.execute(sqlStr, param)
result = curs.fetchall()
curs.close()
if len(result) > 0:
return result
def getMd5Key(src):
m2 = hashlib.md5()
m2.update(src)
dest2 = int(m2.hexdigest(), 16)
return dest2
def getCatKey(url):
return str(getMd5Key(url))[0:16]
class SiteNameIDDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_site_dict = dict(getAllWebsiteBaseInfo())
cls._inst = super(SiteNameIDDict, cls).__new__(cls)
return cls._inst
def getSiteIdByName(self, siteName):
return self.t_site_dict[siteName]
def getSiteIdByName(siteName):
siteNameIDDict = SiteNameIDDict()
return siteNameIDDict.getSiteIdByName(siteName)
class ProdCatIdDict(object):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.t_cat_dict = dict()
cls._inst = super(ProdCatIdDict, cls).__new__(cls)
return cls._inst
def __getKey(self, siteId, rawCatId):
return '_'.join(map(str, (siteId, rawCatId)))
def getProdCatId(self, siteId, catUrl):
rawCatId = getCatKey(catUrl)
key = self.__getKey(siteId, rawCatId)
value = self.t_cat_dict.get(key, None)
if value is None:
value = getCatIdFromRawCatID(rawCatId, siteId)
self.t_cat_dict[key] = value
return value
def getCatIdFromRawInfo(siteId, catUrl):
catIdDict = ProdCatIdDict()
ret = catIdDict.getProdCatId(siteId, catUrl)
if ret :
return ret[0]
def getProdInfoRawIDMapId(siteId):
return dict(getAllRawProdIdsBySite(siteId))#key:raw_id value:id
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test(siteName, encoding='gb2312'):
site_id = getSiteIdByName(siteName)
fileName = os.path.join(testFilePath, '360buy_spider.log')
conn = getConnect()
params = list()
i = 0
prodIds = set([ t[0] for t in getAllRawProdIdsBySite(site_id)])
print len(prodIds)
import re
p = re.compile('[0-9]+')
p1 = re.compile(u'下单')
fOutput = open('c:t360buy_jian.log', 'w')
with open(fileName, 'r') as fInput:
for line in fInput:
line = line.strip().decode(encoding, 'ignore')
ret = line.split('|')
raw_id = ret[0]
if p.search(ret[6]) != None and p1.search(ret[6]) != None:
fOutput.write(ret[0] + ' ' + ret[6] + '\n')
#if getProdId(site_id,raw_id) != None:
if raw_id in prodIds:
#print '++++++++++++++++++++++++++'
continue
prodIds.add(raw_id)
name = ret[3]
repu = ret[4]
eval_num = ret[5]
url = ret[7]
img_url = ret[8]
catUrl = eval(ret[-1])[0]
cat_id = getCatIdFromRawInfo(site_id, catUrl)
u_time = ret[2]
#print raw_id , name , url , repu, eval_num, img_url ,cat_id
param = (site_id , raw_id , name , url , img_url , repu, eval_num, cat_id,u_time)
if cat_id == '':
print param
params.append(param)
i = i + 1
if i == 100:
batchSaveProdBaseInfo(conn, params)
params = list()
i = 0
if i > 0:
batchSaveProdBaseInfo(conn, params)
del params
conn.close()
fOutput.close()
def batchUpdateProdBaseInfo(conn, prodList):
''' 批量更新商品基本信息到表 table_name'''
curs = initClientEncode(conn)
sqlPattern = '''update prod_base_info_3c set repu=%s where id = %s'''
#sqlStr = sqlPattern.format(table_name=table_name)
wparams = list()
for pBaseInfo in prodList:
wparams.append([seEncode(pt) for pt in pBaseInfo])
curs.executemany(sqlPattern, wparams)
conn.commit()
curs.close()
if __name__ == '__main__':
for t in match55bigoCats('笔记本'):
print seEncode(t)
| [
[
1,
0,
0.0093,
0.0023,
0,
0.66,
0,
838,
0,
1,
0,
0,
838,
0,
0
],
[
1,
0,
0.0116,
0.0023,
0,
0.66,
0.0278,
688,
0,
1,
0,
0,
688,
0,
0
],
[
1,
0,
0.0162,
0.0023,
0,
... | [
"import MySQLdb",
"import os",
"import hashlib",
"def update360buyRepu():\n conn = getConnect()\n qPage = MySQLQueryPagination(conn=conn, numPerPage=2000)\n sqlStr = r'SELECT id,repu FROM `prod_base_info_3c` where site_id=6'\n for result in qPage.queryForList(sql=sqlStr):\n prodList = []\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
import string
from enum import Enum
from spiderconfigparser import SpiderConfig
def translator(frm = '',to = '',delete = '',keep = None):
if len(to) == 1:
to = to * len(frm)
trans = string.maketrans(frm,to)
if keep is not None:
allchars = string.maketrans('','')
delete = allchars.translate(allchars,keep.translate(allchars,delete))
def translate(s):
if isinstance(s, unicode):
s = s.encode('utf-8','ignore')
return s.translate(trans,delete)
return translate
digits_only = translator(keep = string.digits)
rootUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='amazon', catagoryLevel=0)
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSortListParser(RootCatagoryPageParser):
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSortListParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getSubUrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
nameSeg = t.find(name='span',attrs={'class':'refinementLink'})
if not nameSeg:
continue
#prodTotalNumSeg = t.find(name='span',attrs={'class':'narrowValue'})
name = nameSeg.getText()
#totalNum = prodTotalNumSeg.getText()
#print digits_only(totalNum)
url = t['href']
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,
isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getSubUrlSums()
return self.filterUrlList(result)
class AmazonListFirstPageParser(Sort3PageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonListFirstPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find( attrs={'class':'title'})
if titleSeg is None:
return
if titleSeg.a is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = url.split('/')[-2]
url = 'http://www.amazon.cn/mn/detailApp?asin={}'.format(pid)
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrl = ParserUtils.getImgUrl(prod.find(name='div',attrs={'class':'image'}))
repuSeg = prod.find(name='div', attrs={'class':'stars'})
reputation = '0'
if repuSeg != None:
reputation = ParserUtils.getDigit(repuSeg.img['alt'])
evlSeg = prod.find(name='div', attrs={'class':'reviewsCount'})
evaluateNum = '0'
if evlSeg != None:
evaluateNum = ParserUtils.getDigit(evlSeg.a.getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice=currentPrice, pubPrice=pastPrice,
reputation=reputation,evaluateNum=evaluateNum,name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
# return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
return r'http://www.amazon.cn/mn/search/ajax/{}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
#tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
query = urlparse.urlparse(nextPageUrl).query
pageNum = urlparse.parse_qs(query)['page'][0]
if(int(pageNum) >= SpiderConfig.getMaxPage()):
return []
urlSum = self.buildSort_4(nextPageUrl)
return [urlSum]
class AmazonNextPageJsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonNextPageJsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonListFirstPageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonListFirstPageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonListFirstPageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
parserDict = {0:AmazonAllSortParser, 1:AmazonSortListParser, 2:AmazonSortListParser, 3:AmazonListFirstPageParser, 4:AmazonNextPageJsonParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
exclude = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps',u'stripbooks',u'music',u'apparel',u'electronics']]
include = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps']]
firstPage = AmazonAllSortParser(content, rootUrlSum, include = None,exclude=exclude)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSortListParser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-00-22.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonListFirstPageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def deepSort3Page():
from crawlerhttp import getContentFromUrlSum
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_ex_n_1?rh=n%3A814224051%2Cn%3A814227051%2Cn%3A98519071&bbn=98519071&ie=UTF8&qid=1322031024',
parentPath=[rootObuyUrlSummary], catagoryLevel=2)
content = getContentFromUrlSum(sort_2_urlsum)
parser = AmazonSortListParser(content, sort_2_urlsum)
for urlsum in parser.parserSubUrlSums():
print urlsum.name,urlsum.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-08-17.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonNextPageJsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-06-28.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Details()
| [
[
8,
0,
0.0195,
0.018,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0329,
0.003,
0,
0.66,
0.0333,
878,
0,
2,
0,
0,
878,
0,
0
],
[
1,
0,
0.0359,
0.003,
0,
0.66,
... | [
"'''\nCreated on 2011-7-27\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from BeautifulSoup import BeautifulSoup, Comment",
"from pageparser import *",
"import itertools",
"import json",
"import os",
"import re",
"import urllib",
"import urlparse",
"import string",
"from enum import Enum"... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from amazon.amazonpageparser import parserDict,rootUrlSummary
from spider import ObuySpider,main
import os,sys
from logfacade import LoggerFactory
class AmazonSpider(ObuySpider):
def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
procDetails=True, include=None, exclude=None, rootPageResult=None,):
super(AmazonSpider, self).__init__(rootUrlSummary, parserDict, threadNum,
procDetails, include, exclude, rootPageResult)
def init_urls(self):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catFile = os.path.join(curPath,'amazon.cat')
with open(catFile) as f:
for line in f:
name,url,level = [t.decode('utf-8') for t in line.split(',')]
self.putSpideRequest(ObuyUrlSummary(name=name,url=url,catagoryLevel=int(level)))
def procParserResult(self, result, urlsum, parser):
if urlsum.catagoryLevel == 3:#作为最终页面的标志
urlsum.parent = urlsum
parserResult = parser.parserSubUrlSums()
if parserResult:
for subUrlSum in parserResult:
self.putSpideRequest(subUrlSum)
else:
if urlsum.catagoryLevel == 2:
urlsum.catagoryLevel = 3
self.putSpideRequest(urlsum)
self.procPageInfos(parser,urlsum)
if __name__ == '__main__':
main(root=rootUrlSummary,parserDict=parserDict,SpiderClass = AmazonSpider)
| [
[
8,
0,
0.1395,
0.1163,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2093,
0.0233,
0,
0.66,
0.1429,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.2326,
0.0233,
0,
0.66... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from pageparser import ObuyUrlSummary",
"from amazon.amazonpageparser import parserDict,rootUrlSummary",
"from spider import ObuySpider,main",
"import os,sys",
"from logfacade import LoggerFactory",
"class AmazonSpider(ObuySpider):\n def __init_... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from pageparser import ObuyUrlSummary
from amazon.amazonpageparser import parserDict,rootUrlSummary
from spider import ObuySpider,main
import os,sys
from logfacade import LoggerFactory
class AmazonSpider(ObuySpider):
def __init__(self, rootUrlSummary=None, parserDict=None, threadNum=5,
procDetails=True, include=None, exclude=None, rootPageResult=None,):
super(AmazonSpider, self).__init__(rootUrlSummary, parserDict, threadNum,
procDetails, include, exclude, rootPageResult)
def init_urls(self):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catFile = os.path.join(curPath,'amazon.cat')
with open(catFile) as f:
for line in f:
name,url,level = [t.decode('utf-8') for t in line.split(',')]
self.putSpideRequest(ObuyUrlSummary(name=name,url=url,catagoryLevel=int(level)))
def procParserResult(self, result, urlsum, parser):
if urlsum.catagoryLevel == 3:#作为最终页面的标志
urlsum.parent = urlsum
parserResult = parser.parserSubUrlSums()
if parserResult:
for subUrlSum in parserResult:
self.putSpideRequest(subUrlSum)
else:
if urlsum.catagoryLevel == 2:
urlsum.catagoryLevel = 3
self.putSpideRequest(urlsum)
self.procPageInfos(parser,urlsum)
if __name__ == '__main__':
main(root=rootUrlSummary,parserDict=parserDict,SpiderClass = AmazonSpider)
| [
[
8,
0,
0.1395,
0.1163,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2093,
0.0233,
0,
0.66,
0.1429,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.2326,
0.0233,
0,
0.66... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from pageparser import ObuyUrlSummary",
"from amazon.amazonpageparser import parserDict,rootUrlSummary",
"from spider import ObuySpider,main",
"import os,sys",
"from logfacade import LoggerFactory",
"class AmazonSpider(ObuySpider):\n def __init_... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from copy import deepcopy
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort2UrlS__getSubUrlSums finalUrlList = []
allSort = self.soup.find(attrs={"id":"siteDirectory"})
for t in allSort.findAll(name='div', attrs={"class":"popover-grouping"}):#一级分类
name = t.find(name='div', attrs={"class":"popover-category-name"}).h2.getText()
url = ''.join((self.mainHost, name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSiblings(name='div')
for tt in sort_2:#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
url = ''.join((self.mainHost,url))
if name.startswith(u'所有'):
continue
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
__getSubUrlSumsilterUrlList(result)
class AmazonSort2Parser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'分类') >= 0
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort3 = self.soup.findAll(name='div', attrs={"class":"unified_widget blurb"})
for alls3 in allSort3:
if self.__isCat(alls3.h2.getText()):
break
for t in alls3.findAll(name='div',attrs={'class':'title'}):
name, url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
class AmazonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
resultList = []
for prod in self.soup.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
pName, url = ParserUtils.parserTag_A(prod.find(name='div', attrs={'class':'title'}).a)
pid = pName
currentPrice = ParserUtils.getPrice(prod.find(name='div',attrs={'class':'newPrice'}).span.getText())
bypastSeg = prod.find(name='div',attrs={'class':'newPrice'}).strike
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
print url
url = url.replace(r'/gp/search','#')
baseUrl = self.rootUrlSummary.url.rsplit('#')[0]
fullUrl = ''.join((baseUrl,url))
return fullUrl
def parserSubUrlSums(self):
result = self.__getNextPageUrl()
if result is None:
return []
else:
urlSum = deepcopy(self.rootUrlSummary)
urlSum.url = result
return [urlSum]
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazonSite.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/site-directory/ref=topnav_sad', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_2 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_2.url , sort_2.catagoryLevel
def testSort2Page():
fileName = os.path.join(testFilePath, '888465051.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/%E7%94%B5%E8%84%91%E5%8F%8A%E9%85%8D%E4%BB%B6/b/ref=sd_allcat_pc?ie=UTF8&node=888465051',
parentPath=[('test')], catagoryLevel=2)
sort3Page = AmazonSort2Parser(content, sort_2_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testComment():
from BeautifulSoup import BeautifulSoup, Comment
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'watch_json.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testAllSortPage()
#testSort2Page()
#testSort3Page()
#testSort3Details()
#testComment()
testJson()
| [
[
8,
0,
0.0333,
0.0333,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0571,
0.0048,
0,
0.66,
0.0556,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0619,
0.0048,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"import re",
"from copy import deepcopy",
"class AmazonAllSortParser(RootCatagoryPageParser):\n '''\n 从http://www.amazon.cn/gp/site-directory获取所有的分类信息,\n 组合成ObuyUrlSummary\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
import string
from enum import Enum
from spiderconfigparser import SpiderConfig
def translator(frm = '',to = '',delete = '',keep = None):
if len(to) == 1:
to = to * len(frm)
trans = string.maketrans(frm,to)
if keep is not None:
allchars = string.maketrans('','')
delete = allchars.translate(allchars,keep.translate(allchars,delete))
def translate(s):
if isinstance(s, unicode):
s = s.encode('utf-8','ignore')
return s.translate(trans,delete)
return translate
digits_only = translator(keep = string.digits)
rootUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='amazon', catagoryLevel=0)
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSortListParser(RootCatagoryPageParser):
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSortListParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getSubUrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
nameSeg = t.find(name='span',attrs={'class':'refinementLink'})
if not nameSeg:
continue
#prodTotalNumSeg = t.find(name='span',attrs={'class':'narrowValue'})
name = nameSeg.getText()
#totalNum = prodTotalNumSeg.getText()
#print digits_only(totalNum)
url = t['href']
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,
isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getSubUrlSums()
return self.filterUrlList(result)
class AmazonListFirstPageParser(Sort3PageParser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonListFirstPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find( attrs={'class':'title'})
if titleSeg is None:
return
if titleSeg.a is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = url.split('/')[-2]
url = 'http://www.amazon.cn/mn/detailApp?asin={}'.format(pid)
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrl = ParserUtils.getImgUrl(prod.find(name='div',attrs={'class':'image'}))
repuSeg = prod.find(name='div', attrs={'class':'stars'})
reputation = '0'
if repuSeg != None:
reputation = ParserUtils.getDigit(repuSeg.img['alt'])
evlSeg = prod.find(name='div', attrs={'class':'reviewsCount'})
evaluateNum = '0'
if evlSeg != None:
evaluateNum = ParserUtils.getDigit(evlSeg.a.getText())
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice=currentPrice, pubPrice=pastPrice,
reputation=reputation,evaluateNum=evaluateNum,name=pName, adWords='')
prodDetail.catagory = self.rootUrlSummary
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
# return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
return r'http://www.amazon.cn/mn/search/ajax/{}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
#tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
query = urlparse.urlparse(nextPageUrl).query
pageNum = urlparse.parse_qs(query)['page'][0]
if(int(pageNum) >= SpiderConfig.getMaxPage()):
return []
urlSum = self.buildSort_4(nextPageUrl)
return [urlSum]
class AmazonNextPageJsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonNextPageJsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonListFirstPageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonListFirstPageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonListFirstPageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
parserDict = {0:AmazonAllSortParser, 1:AmazonSortListParser, 2:AmazonSortListParser, 3:AmazonListFirstPageParser, 4:AmazonNextPageJsonParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
exclude = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps',u'stripbooks',u'music',u'apparel',u'electronics']]
include = [ ObuyUrlSummary(name=name) for name in [u'video',u'aps']]
firstPage = AmazonAllSortParser(content, rootUrlSum, include = None,exclude=exclude)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSortListParser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-00-22.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonListFirstPageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def deepSort3Page():
from crawlerhttp import getContentFromUrlSum
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_ex_n_1?rh=n%3A814224051%2Cn%3A814227051%2Cn%3A98519071&bbn=98519071&ie=UTF8&qid=1322031024',
parentPath=[rootObuyUrlSummary], catagoryLevel=2)
content = getContentFromUrlSum(sort_2_urlsum)
parser = AmazonSortListParser(content, sort_2_urlsum)
for urlsum in parser.parserSubUrlSums():
print urlsum.name,urlsum.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-08-17.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonNextPageJsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'amazon_2011-10-09_20-06-28.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonListFirstPageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testSort3Details()
| [
[
8,
0,
0.0195,
0.018,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0329,
0.003,
0,
0.66,
0.0333,
878,
0,
2,
0,
0,
878,
0,
0
],
[
1,
0,
0.0359,
0.003,
0,
0.66,
... | [
"'''\nCreated on 2011-7-27\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from BeautifulSoup import BeautifulSoup, Comment",
"from pageparser import *",
"import itertools",
"import json",
"import os",
"import re",
"import urllib",
"import urlparse",
"import string",
"from enum import Enum"... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-10-10
@author: zhongfeng
'''
## {{{ http://code.activestate.com/recipes/84317/ (r2)
from threading import Condition,Thread
import copy
class Future(object):
def __init__(self,func,*param):
# Constructor
self.__done=0
self.__result=None
self.__status='working'
self.__C=Condition() # Notify on this Condition when result is ready
# Run the actual function in a separate thread
self.__T=Thread(target=self.wrapper,args=(func,param))
self.__T.setName("FutureThread")
self.__T.start()
def __repr__(self):
return '<Future at '+hex(id(self))+':'+self.__status+'>'
def __call__(self):
self.__C.acquire()
while self.__done == 0:
self.__C.wait()
self.__C.release()
# We deepcopy __result to prevent accidental tampering with it.
a=copy.deepcopy(self.__result)
return a
def wrapper(self, func, param):
# Run the actual function, and let us housekeep around it
self.__C.acquire()
try:
self.__result=func(*param)
except:
self.__result="Exception raised within Future"
self.__done = 1
self.__status = self.__result
self.__C.notify()
self.__C.release()
## end of http://code.activestate.com/recipes/84317/ }}}
class Singleton(object):
''' python 风格的单例模式 '''
def __new__(cls,*args,**kargs):
if '_inst' not in vars(cls):
cls._inst = super(Singleton,cls).__new__(cls, *args,**kargs)
return cls._inst
from time import time
def profile(func):
def log(*args,**kargs):
start = time()
ret = func(*args,**kargs)
end = time()
expire = end - start
print func.__name__,expire
return ret
return log
def profile1(sec):
def around(func):
def log(*args,**kargs):
start = time()
ret = func(*args,**kargs)
end = time()
expire = end - start
print func.__name__,expire
return ret
return log
def after(func):
def log(*args,**kargs):
start = time()
print func.__name__,start
ret = func(*args,**kargs)
return ret
return log
return {'around':around,'after':after}[sec]
from copy import deepcopy
def keeper(func):
defArgs = func.__defaults__
def wrap(*args,**kargs):
funcDef = deepcopy(defArgs)
func.__defaults__ = funcDef
return func(*args,**kargs)
return wrap
import md5
def hash(key):
m = md5.new()
keyStr = str(key)
m.update(keyStr)
return long(m.hexdigest(), 16)
import bisect
class ConsistentHash(object):
def __init__(self,nodes,numOfReplicas = 4,hashfunc = hash):
self.hashfunc = hashfunc
self.numOfReplicas = numOfReplicas
self.ring = {}
self.__sorted_key_list = []
if nodes:
for node in nodes:
self.addNode(node)
def addNode(self,node):
for num in range(self.numOfReplicas):
genKey = self.hashfunc('%s:%s' % (node,num))
self.ring[genKey] = node
bisect.insort(self.__sorted_key_list,genKey)
def removeNode(self,node):
for num in range(self.numOfReplicas):
key = self.hashfunc('%s:%s' % (node,num))
del self.ring[key]
self.__sorted_key_list.remove(key)
def getNode(self,obj):
genKey = self.hashfunc(obj)
nodeKey = self.__getNodeKey(genKey)
if nodeKey != None:
return self.ring[nodeKey]
def __getNodeKey(self,key):
if not self.ring:
return None
for nodeKey in self.__sorted_key_list:
if key <= nodeKey:
return nodeKey
return self.__sorted_key_list[0]
@keeper
def test(l = [],t = 2):
l.append('1')
print l
return t
memcache_servers = ['192.168.0.246:11212',
'192.168.0.247:11212',
'192.168.0.249:11212',
'192.168.0.250:11212']
if __name__ == '__main__':
f = Future(test, *([1,2,3],3))
print f() | [
[
8,
0,
0.0424,
0.0303,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0727,
0.0061,
0,
0.66,
0.0625,
83,
0,
2,
0,
0,
83,
0,
0
],
[
1,
0,
0.0788,
0.0061,
0,
0.66,
... | [
"'''\nCreated on 2011-10-10\n\n@author: zhongfeng\n'''",
"from threading import Condition,Thread",
"import copy",
"class Future(object):\n\n def __init__(self,func,*param):\n # Constructor\n self.__done=0\n self.__result=None\n self.__status='working'\n self.__C=Condition... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
class DangDangAllSortParser(RootCatagoryPageParser):
'''
从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息,
组合成ObuyUrlSummary,不包含图书
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'class':'categories_mainBody'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
name = t['id']
if name == 'book': #不解析图书
continue
url = ''.join((r'http://category.dangdang.com/',name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
for tt in sort_2(name='li'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.a.findNextSiblings(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '&'.join((url,'store=eq0'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class DangDangSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = 'p={}'
return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
s = self.soup.find(name='span',attrs = {'id':'all_num'})
if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
st = self.soup.find(name='input',attrs = {'id':'jumpto'})
if st != None:
s = st.findNextSibling(name='span')
if s is None:
return 1
pageNum = s.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
resultList = []
if plist is None:
prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
else:
prodSeg = plist.findAll(name='li')
for prod in prodSeg:
pNameSeg = prod.find(attrs={'class':'name'})
if pNameSeg is None:
pNameSeg = prod.find(attrs={'class':'title'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
pid = url.rsplit('=',1)[-1]
t = prod.find(attrs={'class':'price_d'})
if t != None :
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = prod.find(attrs={'class':'price_m'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
starLevelSeg = prod.find(name = 'p',attrs={'class':'starlevel'})
repu = 0.0
evalNum = 0
if starLevelSeg:
for starImg in starLevelSeg.findAll(name='img'):
if starImg['src'] == 'images/star_all.png':
repu += 1.0
elif starImg['src'] == 'images/star_half.png':
repu += 0.5
evalNum = starLevelSeg.find(name='span').a.getText()
imgUrlSeg = prod.find(attrs={'class':re.compile('.*pic')})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl= url,imageUrl = imgUrl, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='',reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class DangDangSort4PageParser(DangDangSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:DangDangAllSortParser, 3:DangDangSort3PageParser, 4:DangDangSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-04_10-31-18.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print type(product.logstr())
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0383,
0.0383,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.071,
0.0055,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0765,
0.0055,
0,
0.66,... | [
"'''\nCreated on 2011-8-02\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')",
"class DangDangAllSortParser(RootCatagoryPageParser):\n '''\n... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangdang.dangpageparser import parserDict,dangdangRoot
from spider import main
if __name__ == '__main__':
main(dangdangRoot,parserDict) | [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
945,
0,
2,
0,
0,
945,
0,
0
],
[
1,
0,
0.8125,
0.0625,
0,
0.66,... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from dangdang.dangpageparser import parserDict,dangdangRoot",
"from spider import main",
"if __name__ == '__main__':\n main(dangdangRoot,parserDict)",
" main(dangdangRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangdang.dangpageparser import parserDict,dangdangRoot
from spider import main
if __name__ == '__main__':
main(dangdangRoot,parserDict) | [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
945,
0,
2,
0,
0,
945,
0,
0
],
[
1,
0,
0.8125,
0.0625,
0,
0.66,... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from dangdang.dangpageparser import parserDict,dangdangRoot",
"from spider import main",
"if __name__ == '__main__':\n main(dangdangRoot,parserDict)",
" main(dangdangRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
from spiderconfigparser import SpiderConfig
dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
class DangDangAllSortParser(RootCatagoryPageParser):
'''
从http://category.dangdang.com/?ref=www-0-C 获取所有的分类信息,
组合成ObuyUrlSummary,不包含图书
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'class':'categories_mainBody'})
for t in allSort.findAll(name='div',attrs={'id':re.compile(r'[a-z]*')}):#一级分类
name = t['id']
if name == 'book': #不解析图书
continue
url = ''.join((r'http://category.dangdang.com/',name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs={'class':''.join([name,'_details'])})
for tt in sort_2(name='li'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.a.findNextSiblings(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '&'.join((url,'store=eq0'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class DangDangSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
pageSeg = 'p={}'
return '%s&%s' % (self.rootUrlSummary.url,pageSeg)
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
s = self.soup.find(name='span',attrs = {'id':'all_num'})
if s is None: #dangdang_2011-08-04_10-00-04.html页面格式解析
st = self.soup.find(name='input',attrs = {'id':'jumpto'})
if st != None:
s = st.findNextSibling(name='span')
if s is None:
return 1
pageNum = s.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def parserPageInfos(self):
plist = self.soup.find(name='ul',attrs={'class':'mode_goods clearfix'})
resultList = []
if plist is None:
prodSeg = self.soup.findAll(attrs = {'class':'listitem '})
else:
prodSeg = plist.findAll(name='li')
for prod in prodSeg:
pNameSeg = prod.find(attrs={'class':'name'})
if pNameSeg is None:
pNameSeg = prod.find(attrs={'class':'title'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
pid = url.rsplit('=',1)[-1]
t = prod.find(attrs={'class':'price_d'})
if t != None :
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
t = prod.find(attrs={'class':'price_m'})
if t != None:
pastPrice = ParserUtils.getPrice(t.getText())
else:
pastPrice = 0.00
starLevelSeg = prod.find(name = 'p',attrs={'class':'starlevel'})
repu = 0.0
evalNum = 0
if starLevelSeg:
for starImg in starLevelSeg.findAll(name='img'):
if starImg['src'] == 'images/star_all.png':
repu += 1.0
elif starImg['src'] == 'images/star_half.png':
repu += 0.5
evalNum = starLevelSeg.find(name='span').a.getText()
imgUrlSeg = prod.find(attrs={'class':re.compile('.*pic')})
imgUrl = ParserUtils.getImgUrl(imgUrlSeg)
prodDetail = ProductDetails(productId=pid, fullUrl= url,imageUrl = imgUrl, privPrice = currentPrice,pubPrice=pastPrice,
name=pName, adWords='',reputation=repu,evaluateNum=evalNum)
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class DangDangSort4PageParser(DangDangSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(DangDangSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
parserDict = {0:DangDangAllSortParser, 3:DangDangSort3PageParser, 4:DangDangSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testDangDangAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = DangDangAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-04_10-31-18.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[], catagoryLevel=3)
sort3Page = DangDangSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print type(product.logstr())
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0383,
0.0383,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.071,
0.0055,
0,
0.66,
0.0625,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0765,
0.0055,
0,
0.66,... | [
"'''\nCreated on 2011-8-02\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"from spiderconfigparser import SpiderConfig",
"dangdangRoot = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='dangdang')",
"class DangDangAllSortParser(RootCatagoryPageParser):\n '''\n... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-07-11
日志工厂类
@author: zhongfeng
'''
import logging.config
import os,sys
class LoggerFactory(object):
_loggerFac = None
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
#modPath = os.path.dirname(__file__)
logCfg = os.path.join(curPath,'logging.conf')
if not os.path.exists(logCfg):
pass
else:
logging.config.fileConfig(logCfg)
cls._inst = super(LoggerFactory, cls).__new__(cls)
return cls._inst
@classmethod
def getLogger(cls,logName='root'):
if cls._loggerFac == None:
cls._loggerFac = LoggerFactory()
if isinstance(logName,type): #传递的是一个类,去类名
logName = logName.__name__
return logging.getLogger(logName)
def __del__(self):
logging.shutdown()
@classmethod
def shutdown(cls):
logging.shutdown()
def testMutiThread():
from threadpool import ThreadPool,WorkRequest
def printlog(msg):
logger = LoggerFactory.getLogger()
logger.info('-'.join([msg,ctime()]))
urls = (r'http://www.360buy.com/product/{}.html'.format(str(proid)) for proid in xrange(1,14000))
#print urls
#requests = makeRequests(printlog,urls)
print "Creating thread pool with 3 worker threads."
main = ThreadPool(3)
[main.putRequest(WorkRequest(printlog,[url])) for url in urls ]
main.wait()
if __name__ == '__main__':
from time import ctime
for t in range(10):
logger = LoggerFactory.getLogger('360buy')
logger.info(' %d this is a test %s' % ( t, ctime() ))
| [
[
8,
0,
0.08,
0.0933,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1333,
0.0133,
0,
0.66,
0.2,
232,
0,
1,
0,
0,
232,
0,
0
],
[
1,
0,
0.1467,
0.0133,
0,
0.66,
... | [
"'''\nCreated on 2011-07-11\n\n日志工厂类\n\n@author: zhongfeng\n'''",
"import logging.config",
"import os,sys",
"class LoggerFactory(object):\n _loggerFac = None\n def __init__(self):\n pass\n def __new__(cls):\n '''\n 单态实现,初始化一次 \n '''",
" _loggerFac = None",... |
import os, marshal, thread
# Filename used for index files, must not contain numbers
INDEX_FILENAME = 'index'
# Exception thrown when calling get() on an empty queue
class Empty(Exception): pass
class PersistentQueue:
def __init__(self, name, cache_size=512, marshal=marshal):
"""
Create a persistent FIFO queue named by the 'name' argument.
The number of cached queue items at the head and tail of the queue
is determined by the optional 'cache_size' parameter. By default
the marshal module is used to (de)serialize queue items, but you
may specify an alternative serialize module/instance with the
optional 'marshal' argument (e.g. pickle).
"""
assert cache_size > 0, 'Cache size must be larger than 0'
self.name = name
self.cache_size = cache_size
self.marshal = marshal
self.index_file = os.path.join(name, INDEX_FILENAME)
self.temp_file = os.path.join(name, 'tempfile')
self.mutex = thread.allocate_lock()
self._init_index()
def _init_index(self):
if not os.path.exists(self.name):
os.mkdir(self.name)
if os.path.exists(self.index_file):
index_file = open(self.index_file)
print os.path.abspath(self.index_file)
self.head, self.tail = map(lambda x: int(x),
index_file.read().split(' '))
index_file.close()
else:
self.head, self.tail = 0, 1
def _load_cache(cache, num):
name = os.path.join(self.name, str(num))
mode = 'rb+' if os.path.exists(name) else 'wb+'
cachefile = open(name, mode)
try:
setattr(self, cache, self.marshal.load(cachefile))
except EOFError:
setattr(self, cache, [])
cachefile.close()
_load_cache('put_cache', self.tail)
_load_cache('get_cache', self.head)
assert self.head < self.tail, 'Head not less than tail'
def _sync_index(self):
assert self.head < self.tail, 'Head not less than tail'
index_file = open(self.temp_file, 'w')
index_file.write('%d %d' % (self.head, self.tail))
index_file.close()
if os.path.exists(self.index_file):
os.remove(self.index_file)
os.rename(self.temp_file, self.index_file)
def _split(self):
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
self.tail += 1
if len(self.put_cache) <= self.cache_size:
self.put_cache = []
else:
self.put_cache = self.put_cache[:self.cache_size]
self._sync_index()
def _join(self):
current = self.head + 1
if current == self.tail:
self.get_cache = self.put_cache
self.put_cache = []
else:
get_file = open(os.path.join(self.name, str(current)), 'rb')
self.get_cache = self.marshal.load(get_file)
get_file.close()
try:
os.remove(os.path.join(self.name, str(self.head)))
except:
pass
self.head = current
if self.head == self.tail:
self.head = self.tail - 1
self._sync_index()
def _sync(self):
self._sync_index()
get_file = os.path.join(self.name, str(self.head))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.get_cache, temp_file)
temp_file.close()
if os.path.exists(get_file):
os.remove(get_file)
os.rename(self.temp_file, get_file)
put_file = os.path.join(self.name, str(self.tail))
temp_file = open(self.temp_file, 'wb')
self.marshal.dump(self.put_cache, temp_file)
temp_file.close()
if os.path.exists(put_file):
os.remove(put_file)
os.rename(self.temp_file, put_file)
def __len__(self):
"""
Return number of items in queue.
"""
self.mutex.acquire()
try:
return (((self.tail-self.head)-1)*self.cache_size) + \
len(self.put_cache) + len(self.get_cache)
finally:
self.mutex.release()
def sync(self):
"""
Synchronize memory caches to disk.
"""
self.mutex.acquire()
try:
self._sync()
finally:
self.mutex.release()
def put(self, obj):
"""
Put the item 'obj' on the queue.
"""
self.mutex.acquire()
try:
self.put_cache.append(obj)
if len(self.put_cache) >= self.cache_size:
self._split()
finally:
self.mutex.release()
def get(self):
"""
Get an item from the queue.
Throws Empty exception if the queue is empty.
"""
self.mutex.acquire()
try:
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
self._join()
if len(self.get_cache) > 0:
return self.get_cache.pop(0)
else:
raise Empty
finally:
self.mutex.release()
def close(self):
"""
Close the queue. Implicitly synchronizes memory caches to disk.
No further accesses should be made through this queue instance.
"""
self.mutex.acquire()
try:
self._sync()
if os.path.exists(self.temp_file):
try:
os.remove(self.temp_file)
except:
pass
finally:
self.mutex.release()
## Tests
if __name__ == "__main__":
ELEMENTS = 1000
p = PersistentQueue('test', 1)
print 'Enqueueing %d items, cache size = %d' % (ELEMENTS,
p.cache_size)
for a in range(ELEMENTS/2):
print p.get()
from time import sleep
for a in range(ELEMENTS):
sleep(1)
print a
p.put(str(a))
p.sync()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
print 'Dequeueing %d items' % (ELEMENTS/2)
for a in range(ELEMENTS/2):
p.get()
print 'Queue length (using __len__):', len(p)
p.sync()
p.close()
| [
[
1,
0,
0.005,
0.005,
0,
0.66,
0,
688,
0,
3,
0,
0,
688,
0,
0
],
[
14,
0,
0.0198,
0.005,
0,
0.66,
0.25,
961,
1,
0,
0,
0,
0,
3,
0
],
[
3,
0,
0.0347,
0.005,
0,
0.66,
... | [
"import os, marshal, thread",
"INDEX_FILENAME = 'index'",
"class Empty(Exception): pass",
"class PersistentQueue:\n\n def __init__(self, name, cache_size=512, marshal=marshal):\n \"\"\"\n Create a persistent FIFO queue named by the 'name' argument.\n\n The number of cached queue items... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-9
@author: zhongfeng
'''
from egou.egoupageparser import *
from crawlerhttp import crawle
from pageparser import *
from dbproc.catagoryproc import *
def getContentFromUrlSum(urlsum):
while True:
result = crawle(urlsum)
if result.code == 200:
break
content = result.content
return content
def getAllSort1(content = None):
if content is None:
content = getContentFromUrlSum(egouRoot)
#telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01/',catagoryLevel=1,parentPath=[egouRoot])
#homeCat = ObuyUrlSummary(url='http://www.egou.com/browse07.02/',catagoryLevel=1,parentPath=[egouRoot])
#computeCat = ObuyUrlSummary(url='http://www.egou.com/browse07.03/',catagoryLevel=1,parentPath=[egouRoot])
#include=(telCat,homeCat,computeCat)
parser = EGouSortParser(content, egouRoot)
return parser.parserSubUrlSums()
def getAllSort3():
result = []
for sort_1 in getAllSort1():
content = getContentFromUrlSum(sort_1)
#telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01.01/',catagoryLevel=2)
parser = EGouSort1PageParser(content, sort_1)
for sort_2 in parser.parserSubUrlSums():
content = getContentFromUrlSum(sort_2)
parser = EGouSort2PageParser(content, sort_2)
result.extend(parser.parserSubUrlSums())
return result
if __name__ == '__main__':
from itertools import chain
import os
with open('c:t.log','w') as output:
result = getAllSort3()
calEveryLevelCatNum(result)
createSiteCat(result,u'55bigo')
#for sort3 in getAllSort3():
#output.write( '|'.join([cat.name.encode('gb18030') for cat in chain(sort3.parentPath,(sort3.parent,))]))
#output.write(os.linesep)
| [
[
8,
0,
0.1053,
0.0877,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1754,
0.0175,
0,
0.66,
0.125,
157,
0,
1,
0,
0,
157,
0,
0
],
[
1,
0,
0.193,
0.0175,
0,
0.66,
... | [
"'''\nCreated on 2011-11-9\n\n@author: zhongfeng\n'''",
"from egou.egoupageparser import *",
"from crawlerhttp import crawle",
"from pageparser import *",
"from dbproc.catagoryproc import *",
"def getContentFromUrlSum(urlsum):\n while True:\n result = crawle(urlsum)\n if result.code == 20... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-09
@author: zhongfeng
'''
import re
from pageparser import *
egouRoot = ObuyUrlSummary(url=r'http://www.egou.com/', name='egou')
mainHost = 'http://www.egou.com'
def filterCatName(name):
p = re.compile(r'\([0-9]*\)')
return p.sub('',name)
class EGouSortParser(RootCatagoryPageParser):
'''
从http://www.egou.com/获取所有的分类信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def _getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div',attrs={'id':'_JD_ALLSORT'})
for t in allSort.findAll(name='div',attrs={'class':'item'}):#一级分类
name,url = ParserUtils.parserTag_A(t.span.a)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
#sort_2 = t.find(attrs={'class':'subitem'})
#for tt in sort_2(name='dt'):#二级分类
# name, url = ParserUtils.parserTag_A(tt.a)
# sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
# finalUrlList.append(sort_2_urlsum)
finalUrlList.append(sort_1_urlsum)
return finalUrlList
def parserSubUrlSums(self):
result = self._getBaseSort1UrlSums()
return self.filterUrlList(result)
class EGouSort1PageParser(RootCatagoryPageParser):
'''
一级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSort1PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getNextSortUrlSums(self,firstFinalPage = False):
finalUrlList = []
rSort2 = self.soup.find(name='div',attrs={'class':'bi_mainBox_L_1_m_2_1','style':'padding-left:18px;'})
for t in rSort2.findAll(name='div',attrs={'class':'cat_1'}):
name,url = ParserUtils.parserTag_A(t.a)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage = firstFinalPage)
finalUrlList.append(sort_2_urlsum)
rSort2_more = self.soup.find(name='div',attrs={'id':'biPopLayer2'})
if rSort2_more:
for t in rSort2_more(name='a'):
name,url = ParserUtils.parserTag_A(t)
name = filterCatName(name)
url = ''.join((mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary,firstFinalPage = firstFinalPage)
finalUrlList.append(sort_2_urlsum)
return finalUrlList
def parserSubUrlSums(self):
result = self.getNextSortUrlSums()
return self.filterUrlList(result)
class EGouSort2PageParser(EGouSort1PageParser):
'''
二级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(EGouSort2PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
result = self.getNextSortUrlSums(firstFinalPage = True)
return self.filterUrlList(result)
parserDict = {0:EGouSortParser, 1:EGouSort1PageParser,2:EGouSort2PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testEgouSortPage():
fileName = os.path.join(testFilePath,'egouAllSort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
telCat = ObuyUrlSummary(url='http://www.egou.com/browse07.01/',catagoryLevel=1,parentPath=[egouRoot])
firstPage = EGouSortParser(content, egouRoot,include=(telCat,))
for sort_1 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_1.parentPath):
pass
print sort_1.name,sort_1.url ,sort_1.catagoryLevel
def testEgouSort1Page():
fileName = os.path.join(testFilePath,'browse08.3085_sort1.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
egSortUrl = ObuyUrlSummary(url='http://www.egou.com/browse08.3085',catagoryLevel=1)
firstPage = EGouSort1PageParser(content, egouRoot)
for sort_2 in firstPage.parserSubUrlSums():
print sort_2.name,sort_2.url ,sort_2.catagoryLevel
def testEgouSort2Page():
fileName = os.path.join(testFilePath,'browse07.01.01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
egSortUrl = ObuyUrlSummary(url='http://www.egou.com/browse07.01.01',catagoryLevel=3)
firstPage = EGouSort2PageParser(content, egSortUrl)
for sort_2 in firstPage.parserSubUrlSums():
print sort_2.name,sort_2.url ,sort_2.catagoryLevel
if __name__ == '__main__':
#testRegx()
#testDangDangAllSortPage()
#testSort3Page()
testEgouSort2Page()
#testEgouSort1Page()
| [
[
8,
0,
0.0448,
0.0373,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0746,
0.0075,
0,
0.66,
0.0588,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.0821,
0.0075,
0,
0.66... | [
"'''\nCreated on 2011-11-09\n\n@author: zhongfeng\n'''",
"import re",
"from pageparser import *",
"egouRoot = ObuyUrlSummary(url=r'http://www.egou.com/', name='egou')",
"mainHost = 'http://www.egou.com'",
"def filterCatName(name):\n p = re.compile(r'\\([0-9]*\\)')\n return p.sub('',name)",
" p ... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from newegg.neweggpageparser import parserDict,newEggRoot
from spider import main
if __name__ == '__main__':
main(newEggRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
638,
0,
2,
0,
0,
638,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from newegg.neweggpageparser import parserDict,newEggRoot",
"from spider import main",
"if __name__ == '__main__':\n main(newEggRoot,parserDict)",
" main(newEggRoot,parserDict)"
] |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from spiderconfigparser import SpiderConfig
from crawlerhttp import crawleRetries
from utils import Future
newEggRoot = ObuyUrlSummary(url=ur'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
class NewEggAllSortParser(RootCatagoryPageParser):
'''
从http://www.newegg.com.cn/CategoryList.htm获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.newegg.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name = 'div',attrs={'class':'allCateList'})
for t in allSort.findAll(attrs={'id':re.compile('pd[0-9]+')}):#一级分类
name = t.getText()
url = '#'.join((r'http://www.newegg.com.cn/CategoryList.htm',t['id']))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='dl')
for tt in sort_2(name='dt'):#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findNextSibling(name='dd').findAll(name = 'a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = '?'.join((url,'pageSize=96'))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class NewEggSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'class':'pageNav'}).find(name='ins').getText()
totalPage = int(pageSeg.split('/')[-1])
if totalPage > SpiderConfig.getMaxPage():
totalPage = SpiderConfig.getMaxPage()
return totalPage
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'extraIcon'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1:
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = NewEggSortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
plist = self.soup.find(attrs={'id':'itemGrid1'})
resultList = []
if plist is None:
return resultList
for prod in plist.findAll(attrs={'class':'itemCell noSeller'}):
pName,url = ParserUtils.parserTag_A(prod.find(name ='p',attrs={'class':'info'}).a)
futureTask = Future(self.getAdWords, *(prod, url))
#adWords = self.getAdWords(prod, url)
pid = url.rsplit('/',1)[-1].split('.')[0]
currentPrice = ParserUtils.getPrice(prod.find(attrs={'class':'current'}).strong.getText())
bypastSeg = prod.find(attrs={'class':'bypast'})
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
imgUrlSeg = prod.find(name='dt').findAll(name='img')[-1]
imgUrl = imgUrlSeg['src']
reputation = '0.0'
evlNum = '0'
rankSeg = prod.find(name='dd',attrs={'class':'rank '})
aSeg = None
if rankSeg != None:
aSeg = rankSeg.a
if aSeg != None:
reputation = ParserUtils.getDigit(aSeg['title'])
evlNum = ParserUtils.getDigit(aSeg.getText())
adWords = futureTask()
prodDetail = ProductDetails(productId=pid, fullUrl=url,imageUrl=imgUrl,privPrice = currentPrice,
pubPrice=pastPrice,name=pName, adWords=adWords,reputation=reputation,evaluateNum=evlNum)
prodDetail.reputation = reputation
prodDetail.evaluateNum = evlNum
prodDetail.catagory = self.rootUrlSummary
resultList.append(prodDetail)
return resultList
class NewEggSort4PageParser(NewEggSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class NewEggSortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(NewEggSortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getExtGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
favInfoSeg = self.soup.find(name = 'ul',attrs={'class':'favourableInfo'})
crashCut = '0.00'
if favInfoSeg:
for info in favInfoSeg(name = 'li'):
if info.label.getText().find(u'返现') != -1:
crashCutText = info.getText()
crashCut = ParserUtils.getDigit(crashCutText)
break
return crashCut
def getExtGift(self):
exGiftSeg = self.soup.find(name = 'div',attrs={'class':'presentArea'})
exGift = []
if exGiftSeg:
for index,info in enumerate(exGiftSeg(name = 'dd')):
t = '%s.%s' % (index,info.getText())
exGift.append(t)
return ''.join(exGift)
parserDict = {0:NewEggAllSortParser, 3:NewEggSort3PageParser, 4:NewEggSort4PageParser}
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testNewEggAllSortPage():
fileName = os.path.join(testFilePath,'CategoryList.htm')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/CategoryList.htm', name='newegg')
include = [ ObuyUrlSummary(url = r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器',catagoryLevel = 2)]
firstPage = NewEggAllSortParser(content, rootUrlSum,include = include)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print '\t'*index,str(urlsum.getUrlSumAbstract())
print sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'newegg_2011-08-25_16-03-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/1043.htm?pageSize=96',
parentPath=[('test')], catagoryLevel=3)
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
#fileName = os.path.join(testFilePath,'1043.htm')
#with open(fileName, 'r') as fInput:
# content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.newegg.com.cn/SubCategory/970.htm?ep=1',
parentPath=[('test')], catagoryLevel=3)
from crawlerhttp import crawle
content = ''
while True:
result = crawle(sort_3_urlsum)
if result.code == 200:
content = result.content
break
sort3Page = NewEggSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testNewEggAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0341,
0.0341,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0585,
0.0049,
0,
0.66,
0.0556,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0634,
0.0049,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"import re",
"from spiderconfigparser import SpiderConfig",
"from crawlerhttp import crawleRetries",
"from utils import Future",
"class NewEggAllSortParser(RootCatagoryPageParser):\n '''\n ... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from newegg.neweggpageparser import parserDict,newEggRoot
from spider import main
if __name__ == '__main__':
main(newEggRoot,parserDict) | [
[
8,
0,
0.4333,
0.4,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.7333,
0.0667,
0,
0.66,
0.3333,
638,
0,
2,
0,
0,
638,
0,
0
],
[
1,
0,
0.8,
0.0667,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n\n'''",
"from newegg.neweggpageparser import parserDict,newEggRoot",
"from spider import main",
"if __name__ == '__main__':\n main(newEggRoot,parserDict)",
" main(newEggRoot,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, CrawlerType, crawleRetries
from time import strftime
import chardet, re
from urlparse import urlparse
from threadpool import WorkRequest
from crawlerhttp import crawle
from cStringIO import StringIO
from itertools import chain
encodingDict = {'360buy':'gb2312', 'newegg':'gb2312', 'dangdang':'gb2312', 'gome':'utf-8',
'amazon':'utf-8', 'coo8':'gb2312', 'suning':'utf-8','egou':'GBK',}#'efeihu':'utf-8'}
def reinqueue_proc(req, result):
urlsum = req[0]
pool = req[3]
if urlsum.stat == 0:
urlsum.stat = result.code
req = WorkRequest(getProductPrice, req, None,
callback=None)
pool.putRequest(req)
else:
print "Failed %s:%d" % (urlsum.url, result.code)
def getProductPrice(*req):
pimgUrlSumm = req[0]
result = crawleRetries(pimgUrlSumm)
proc_normal_result(req, result)
return result
def proc_normal_result(req, result):
args = req
captcha = req[4]
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha(StringIO(result.content))
resultList.append(prodDetail)
else:
reinqueue_proc(req, result)
class ObuyUrlSummary(UrlSummary):
'''
链接抽象类
'''
def __init__(self, url='', data=None, headers=None, crawlerType=CrawlerType.GET_URL, name='',
isCrawle=True, isRecursed=True, catagoryLevel=0, retries = 4, parentPath=None,parent = None,
stat=0, errReason='', include=None, exclude=None):
super(ObuyUrlSummary, self).__init__(url, data, headers, crawlerType,retries)
self.name = name #分类名称
self.catagoryLevel = catagoryLevel #分类级别
self.parentPath = [] if parentPath is None else parentPath #路径
self.parent = parent
self.isCrawle = isCrawle #是否抓取
self.isRecursed = isRecursed #是否递归抓取
self.stat = stat #抓取的最终状态
self.errReason = errReason #错误原因
self.include = None #subUrl中应该包含的url列表
self.exclude = None #subUrl中剔除的url列表,如果include,exclude同时设置,则include规则优先
def getUrlSumAbstract(self):
return self.name, self.url, self.catagoryLevel
def __str__(self):
return str(vars(self))
__repr__ = __str__
class ParserResult(object):
def logstr(self):
pass
def convertToUnicode(dataStr, siteName):
if isinstance(dataStr, str):
encoding = encodingDict.get(siteName, None)
if encoding is None:
encoding = chardet.detect(dataStr)['encoding']
encodingDict[siteName] = encoding
dataStr = dataStr.decode(encoding, 'ignore')
return dataStr
class Parser(object):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
self.rootUrlSummary = rootUrlSummary
self.include = include
self.exclude = exclude
siteName = urlparse(rootUrlSummary.url).hostname.split('.')[1]
self.dataStr = convertToUnicode(dataStr, siteName)
self.soup = BeautifulSoup(self.dataStr, convertEntities=BeautifulSoup.HTML_ENTITIES) #默认使用BeautifulSoup做解析器
@staticmethod
def compareUrlSumm(urla, urlb):
if urla.url != None and len(urla.url) > 0:
return urla.url == urlb.url
elif urla.name != None and len(urla.name) > 0:
return urla.name == urlb.name
else:
return False
@staticmethod
def urlSummContain(filterArr, finalUrlSum):
#print finalUrlSum.name,finalUrlSum.url
for urlsumm in filterArr:
#print urlsumm.name,urlsumm.url
if Parser.compareUrlSumm(urlsumm, finalUrlSum):
return True
else:
for parent in finalUrlSum.parentPath:
#print parent.name,parent.url
if Parser.compareUrlSumm(urlsumm, parent):
return True
return False
def filterUrlList(self, finalUrlList):
filterResult = finalUrlList
if self.include != None and len(self.include) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if Parser.urlSummContain(self.include, finalUrlSum)]
elif self.exclude != None and len(self.exclude) > 0:
filterResult = [ finalUrlSum for finalUrlSum in finalUrlList
if not Parser.urlSummContain(self.exclude, finalUrlSum)]
return filterResult
def parserPageInfos(self):
'''
返回ParserResult组成的list
'''
pass
def parserSubUrlSums(self):
pass
def getParser(level,parserDict):
return parserDict.get(level,None)
class ParserUtils(object):
'''
html标签解析类,return (name,url)
'''
@staticmethod
def parserTag_A(a):
return a.getText().strip(), a['href'].strip()
@staticmethod
def getPrice(sPrice):
if not sPrice:
return '0.00'
'''¥4899.00变为4899.00'''
sPrice = sPrice.replace(u',', '')
regx = u'[0-9]+.[0-9]+'
p = re.compile(regx)
ret = p.search(sPrice)
if ret is None:
return '0.00'
return ret.group()
@staticmethod
def getDigit(s):
s = s.replace(u',', '')
regx = u'[0-9]+.[0-9]+|[0-9]+'
p = re.compile(regx)
sd = p.search(s)
if sd is None:
return 0
return sd.group()
@staticmethod
def getImgUrl(imgTag):
if imgTag is None:
return ''
return imgTag.img['src']
class RootCatagoryPageParser(Parser):
'''
根站点分类解析父类,获取所有的三级分类的ObuyUrlSummary
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(RootCatagoryPageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_N(self, url, name, parent, isCrawle=True,firstFinalPage = False):
'''
构造各级节点逻辑
'''
sort_n_urlsum = ObuyUrlSummary(url=url, name=name, isCrawle=isCrawle)
sort_n_urlsum.parentPath = []
sort_n_urlsum.catagoryLevel = parent.catagoryLevel + 1
sort_n_urlsum.parentPath.extend(parent.parentPath)
sort_n_urlsum.parentPath.append(parent)
if firstFinalPage:
sort_n_urlsum.parent = sort_n_urlsum
else:
sort_n_urlsum.parent = parent
return sort_n_urlsum
def getBaseSort3UrlSums(self):
pass
def parserSubUrlSums(self):
result = self.getBaseSort3UrlSums()
return self.filterUrlList(result)
class Sort3PageParser(Parser):
'''
三级页面解析类,
a.负责获取当前分类的所有的后续页面的UrlSummary
b.负责获取页面的所有商品的信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def buildSort_4(self, url):
sort4_urlsum = ObuyUrlSummary(url=url, name=self.rootUrlSummary.name,
catagoryLevel=4)
sort4_urlsum.parentPath = []
sort4_urlsum.parentPath.extend(self.rootUrlSummary.parentPath)
sort4_urlsum.parentPath.append(self.rootUrlSummary)
sort4_urlsum.parent = self.rootUrlSummary.parent
return sort4_urlsum
def getTotal(self):
pass
def nextPageUrlPattern(self):
pass
def buildSort_4UrlSums(self):
finalUrlList = []
totalPage = self.getTotal()
if totalPage > 1:
for pageNum in range(2, totalPage + 1):
url = self.nextPageUrlPattern().format(str(pageNum))
finalUrlList.append(self.buildSort_4(url))
return finalUrlList
def getSort4PageUrlSums(self):
return self.buildSort_4UrlSums()
def parserSubUrlSums(self):
result = self.getSort4PageUrlSums()
return self.filterUrlList(result)
def seEncode(ustr,encoding='gb18030'):
if ustr is None:
return ''
if isinstance(ustr,unicode):
return ustr.encode(encoding,'ignore')
else:
return str(ustr)
class ProductDetails(ParserResult):
'''
商品详细信息
'''
def __init__(self, name='', imageUrl='', productId='', catagory=None, fullUrl='', pubPrice='0.00',
privPrice='0.00', adWords='', reputation='0', evaluateNum='0', updateTime=None):
self.name = name #商品名称
self.imageUrl = imageUrl #商品图片URL
self.productId = productId #商品在原网站的ID
self.catagory = catagory #商品所属分类
self.fullUrl = fullUrl #原始链接
self.pubPrice = pubPrice #商品标称的原价
self.privPrice = privPrice #商家卖价,没扣除广告折扣价格
self.adWords = adWords #促销信息,包括下单立减、返劵等
self.reputation = reputation #好评度
self.evaluateNum = evaluateNum #评论数
self.updateTime = strftime("%Y-%m-%d %H:%M:%S") if updateTime is None else updateTime #更新时间
def __getCatagoryAbs(self):
cat = self.catagory.parent
if isinstance(cat, ObuyUrlSummary):
return str((seEncode(cat.url), cat.catagoryLevel))
else:
return ''
#return ','.join([str((seEncode(cat.url), cat.catagoryLevel)) for cat in chain(self.catagory.parentPath, (self.catagory,))])
def __filterStr(self,s):
return ' '.join(seEncode(s).replace('|', ' ').split())
def logstr(self):
return '|'.join(map(self.__filterStr, (self.productId, self.privPrice, self.updateTime, self.name, self.evaluateNum, self.reputation,
self.adWords,self.fullUrl, self.imageUrl, self.__getCatagoryAbs())))
def __str__(self):
return str(vars(self))
__repr__ = __str__
| [
[
8,
0,
0.0236,
0.0236,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0404,
0.0034,
0,
0.66,
0.0435,
878,
0,
1,
0,
0,
878,
0,
0
],
[
1,
0,
0.0438,
0.0034,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from BeautifulSoup import BeautifulSoup",
"from crawlerhttp import UrlSummary, CrawlerType, crawleRetries",
"from time import strftime",
"import chardet, re",
"from urlparse import urlparse",
"from threadpool import WorkRe... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
##
#_
'''
:
'.',
'''
__###___
_#_#_##_
_##__##_
##____##
#_____#_
##____##
#_____#_
##____##
#_____#_
_##__#_#
_##_###_
__##_#__
'''
:
'0',
'''
___##
__#_#
_###_
#__##
#__#_
___##
___#_
___##
___#_
___##
___#_
___##
'''
:
'1',
'''
__###___
_#_#_##_
##____##
#_____#_
______##
_____#__
____###_
___#_#__
__##____
_#______
#######_
#_#_#_##
'''
:
'2',
'''
__###_#_
_#_#_###
##____#_
______##
___##_#_
___#_##_
______##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'3',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
__#__##_
_#___#__
##___##_
#_###_##
##_#_##_
_____#__
_____##_
'''
:
'4',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
___#_##_
_##__#__
#____##_
####_#_#
#_#_####
_____#__
_____##_
'''
:
'4',
'''
_###_##_
_#_###__
_#______
_#______
###_##__
#_###_#_
##____##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'5',
'''
__###_#_
_#_#_###
_##___#_
#_______
##_###__
#_#_#_#_
##____##
#_____#_
##____##
_##___#_
_#_##_#_
__#_##__
'''
:
'6',
'''
###_####
#_###_#_
_____##_
____#___
____##__
___#____
___##___
___#____
___#____
__##____
__#_____
__##____
'''
:
'7',
'''
__####__
_#_#_##_
##____##
#______#
##____##
_#####__
__#_#_#_
##____##
#_____#_
##____##
_####_#_
___#_#__
'''
:
'8',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_##_##__
'''
:
'9',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_#_###__
'''
:
'9',
} | [
[
8,
0,
0.0258,
0.0215,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5236,
0.9571,
0,
0.66,
1,
864,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"COO8_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
##
#_
##
'''
:
'.',
'''
__###_#__
_#____##_
_#_____#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__###_#__
_#____##_
#______#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__##__
__#___
#_##__
__#___
__##__
__#___
__##__
__#___
__##__
__#___
__##__
##_###
'''
:
'1',
'''
_####_#__
#_____##_
##_____#_
_______##
_______#_
______#__
_____##__
____#____
___#_#___
__##_____
_#_______
#_#######
'''
:
'2',
'''
###_###__
#_____#__
##_____##
_______#_
______##_
___##_#__
______##_
_______##
________#
##_____##
#_____#__
_#####___
'''
:
'3',
'''
______##__
_____#_#__
____#_##__
___#__#___
__#___##__
_#____#___
#_____##__
#####_#_##
______##__
______#___
______##__
______#___
'''
:
'4',
'''
_###_####
_#_______
_##______
_#_______
_######__
______#__
_______##
_______#_
_______##
##_____#_
#_____#__
_####_#__
'''
:
'5',
'''
___###_#_
__#______
_##______
#________
##_####__
#_____#__
##_____##
#______#_
##_____##
#______#_
_##___#__
__###_#__
'''
:
'6',
'''
###_####_
_______##
______#__
______##_
_____#___
_____##__
____#____
____##___
___#_____
___##____
__#______
_###_____
'''
:
'7',
'''
__###_#__
_#____##_
##_____##
#_______#
_##___##_
__###_#__
_#____##_
##_____#_
#______##
##_____#_
_#____#__
__####___
'''
:
'8',
'''
__###_#__
_#____##_
##_____#_
#______##
##_____#_
#______##
_##____#_
__###__##
_______#_
______##_
_____#___
_###_#___
'''
:
'9',
} | [
[
8,
0,
0.0278,
0.0231,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5255,
0.9537,
0,
0.66,
1,
864,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"COO8_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from coo8.coo8_feature2 import COO8_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_coo8(CaptchaProfile):
def __init__(self,features_map = COO8_FEATURES_MAP):
super(CaptchaProfile_coo8,self).__init__(features_map)
def __new__(cls,features_map = COO8_FEATURES_MAP):
return super(CaptchaProfile_coo8, cls).__new__(cls,features_map)
def split(self, im,top = 4,bottom = 16):
return super(CaptchaProfile_coo8,self).split(im,top,bottom)
def captcha_coo8(filename):
return captcha(filename, CaptchaProfile_coo8())
def test():
print CaptchaProfile_coo8(r'c:\gp359329,2.png')
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "125487,1.png")
print captcha_coo8(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.1045,
0.1045,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1791,
0.0149,
0,
0.66,
0.0667,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.194,
0.0149,
0,
0.66,... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from coo8.coo8_feature2 import COO8_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from coo8.coo8_feature2 import COO8_FEATURES_MAP
import Image
import itertools
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_coo8(CaptchaProfile):
def __init__(self,features_map = COO8_FEATURES_MAP):
super(CaptchaProfile_coo8,self).__init__(features_map)
def __new__(cls,features_map = COO8_FEATURES_MAP):
return super(CaptchaProfile_coo8, cls).__new__(cls,features_map)
def split(self, im,top = 4,bottom = 16):
return super(CaptchaProfile_coo8,self).split(im,top,bottom)
def captcha_coo8(filename):
return captcha(filename, CaptchaProfile_coo8())
def test():
print CaptchaProfile_coo8(r'c:\gp359329,2.png')
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
if __name__ == '__main__':
fileName = os.path.join(testFilePath, "125487,1.png")
print captcha_coo8(fileName)
# it1 = im.crop((3, 4, 13, 16))
# print cia.GetBinaryMap(it1),'\n'
# it2 = im.crop((15,4,24,16))
# print cia.GetBinaryMap(it2)
# print '+++++++++'
# it2 = im.crop((25, 4, 34, 16))
# it3 = im.crop ((36,4,45,16))
# #it3 = im.crop((35, 4, 37, 16))
# it4 = im.crop((38, 4, 47, 16))
# it5 = im.crop((48, 4, 57, 16))
# #it6 = im.crop((51, 3, 57, 11))
# #it7 = im.crop((59, 3, 65, 11))
# multilist = [[0 for col in range(5)] for row in range(3)]
# print '\n'.join(( str(t) for t in multilist))
#profile = CaptchaProfile_360Buy()
#print captcha_360buy(r'c:\6.png')
| [
[
8,
0,
0.1045,
0.1045,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1791,
0.0149,
0,
0.66,
0.0667,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.194,
0.0149,
0,
0.66,... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from coo8.coo8_feature2 import COO8_FEATURES_MAP",
"import Image",
"import itertools",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from coo8.coo8pageparser import parserDict,coo8Root
from spider import main
if __name__ == '__main__':
main(coo8Root,parserDict)
| [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
430,
0,
2,
0,
0,
430,
0,
0
],
[
1,
0,
0.75,
0.0625,
0,
0.66,
... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n\n'''",
"from coo8.coo8pageparser import parserDict,coo8Root",
"from spider import main",
"if __name__ == '__main__':\n main(coo8Root,parserDict)",
" main(coo8Root,parserDict)"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
##
#_
##
'''
:
'.',
'''
__###_#__
_#____##_
_#_____#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__###_#__
_#____##_
#______#_
##_____##
#______#_
##_____##
#______#_
##_____##
#______#_
_#_____##
_##___#__
__###_#__
'''
:
'0',
'''
__##__
__#___
#_##__
__#___
__##__
__#___
__##__
__#___
__##__
__#___
__##__
##_###
'''
:
'1',
'''
_####_#__
#_____##_
##_____#_
_______##
_______#_
______#__
_____##__
____#____
___#_#___
__##_____
_#_______
#_#######
'''
:
'2',
'''
###_###__
#_____#__
##_____##
_______#_
______##_
___##_#__
______##_
_______##
________#
##_____##
#_____#__
_#####___
'''
:
'3',
'''
______##__
_____#_#__
____#_##__
___#__#___
__#___##__
_#____#___
#_____##__
#####_#_##
______##__
______#___
______##__
______#___
'''
:
'4',
'''
_###_####
_#_______
_##______
_#_______
_######__
______#__
_______##
_______#_
_______##
##_____#_
#_____#__
_####_#__
'''
:
'5',
'''
___###_#_
__#______
_##______
#________
##_####__
#_____#__
##_____##
#______#_
##_____##
#______#_
_##___#__
__###_#__
'''
:
'6',
'''
###_####_
_______##
______#__
______##_
_____#___
_____##__
____#____
____##___
___#_____
___##____
__#______
_###_____
'''
:
'7',
'''
__###_#__
_#____##_
##_____##
#_______#
_##___##_
__###_#__
_#____##_
##_____#_
#______##
##_____#_
_#____#__
__####___
'''
:
'8',
'''
__###_#__
_#____##_
##_____#_
#______##
##_____#_
#______##
_##____#_
__###__##
_______#_
______##_
_____#___
_###_#___
'''
:
'9',
} | [
[
8,
0,
0.0278,
0.0231,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5255,
0.9537,
0,
0.66,
1,
864,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"COO8_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
COO8_FEATURES_MAP = {
'''
__
__
__
__
__
__
__
__
__
__
##
#_
'''
:
'.',
'''
__###___
_#_#_##_
_##__##_
##____##
#_____#_
##____##
#_____#_
##____##
#_____#_
_##__#_#
_##_###_
__##_#__
'''
:
'0',
'''
___##
__#_#
_###_
#__##
#__#_
___##
___#_
___##
___#_
___##
___#_
___##
'''
:
'1',
'''
__###___
_#_#_##_
##____##
#_____#_
______##
_____#__
____###_
___#_#__
__##____
_#______
#######_
#_#_#_##
'''
:
'2',
'''
__###_#_
_#_#_###
##____#_
______##
___##_#_
___#_##_
______##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'3',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
__#__##_
_#___#__
##___##_
#_###_##
##_#_##_
_____#__
_____##_
'''
:
'4',
'''
_____##_
____#_#_
____##__
___#_##_
__##_#__
___#_##_
_##__#__
#____##_
####_#_#
#_#_####
_____#__
_____##_
'''
:
'4',
'''
_###_##_
_#_###__
_#______
_#______
###_##__
#_###_#_
##____##
______#_
##____##
#_#___#_
_######_
___#_#__
'''
:
'5',
'''
__###_#_
_#_#_###
_##___#_
#_______
##_###__
#_#_#_#_
##____##
#_____#_
##____##
_##___#_
_#_##_#_
__#_##__
'''
:
'6',
'''
###_####
#_###_#_
_____##_
____#___
____##__
___#____
___##___
___#____
___#____
__##____
__#_____
__##____
'''
:
'7',
'''
__####__
_#_#_##_
##____##
#______#
##____##
_#####__
__#_#_#_
##____##
#_____#_
##____##
_####_#_
___#_#__
'''
:
'8',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_##_##__
'''
:
'9',
'''
__###___
_#_#_##_
##___##_
#_____##
##_____#
#_____##
_#####_#
__#_#_##
______#_
##___#__
#_##_##_
_#_###__
'''
:
'9',
} | [
[
8,
0,
0.0258,
0.0215,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5236,
0.9571,
0,
0.66,
1,
864,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"COO8_FEATURES_MAP = {\n '''\n__\n__\n__\n__\n__\n__"
] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-7
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from coo8.image_price import captcha_coo8
from crawlerhttp import crawleRetries
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
from spiderconfigparser import SpiderConfig
coo8Root = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8',
isRecursed=True, catagoryLevel=0)
class Coo8AllSortParser(RootCatagoryPageParser):
'''
从http://www.coo8.com/allcatalog/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.coo8.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8AllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'class':'cateItems'})
for t in allSort.findAll(name='div', attrs={'class':re.compile('hd.*')}):#一级分类
sort_1 = t.find(name='h2')
name = sort_1['id']
url = ''.join((self.mainHost,name,'/'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':re.compile('bd.*')})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.h3.getText()
url = ''.join((self.mainHost, sort_1_urlsum.name, name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt(name='dd'):#三级分类
try:
name, url = ParserUtils.parserTag_A(ttt.a)
except Exception:
continue
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class Coo8Sort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-{}-0-101101'
return '%s%s.%s' % (urlSegs[0].replace('-0-0-0-0',''), pageSeg, urlSegs[1])
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
pageSeg = self.soup.find(name='div', attrs={'class':'pageInfo'})
if pageSeg is None:
return 1
pageNum = pageSeg.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'text-tag-wrap'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1 \
or extraMsg.find(u'返券') != -1 :
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = Coo8SortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
resultList = []
plist = self.soup.find(name='div', attrs={'class':'srchContent'})
if plist is None:
#raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
for li in plist(name='li'):
pNameSeg = li.find(name='p', attrs={'class':'name'}).a
pName = pNameSeg['title']
imgUrlSeg = li.find(name='p',attrs={'class':'pic'}).img
imgUrl = ''
if imgUrlSeg:
imgUrl = imgUrlSeg['src']
pid = pNameSeg['href'].rsplit('/')[-1].split('.')[0]
url = pNameSeg['href']
if url and not url.startswith('http'):
url = ''.join((r'http://www.coo8.com',pNameSeg['href']))
adWords = self.getAdWords(prod = li,prodUrl=url)
priceImgUrl = li.find(name='p', attrs={'class':'price'}).img['src']
prodDetail = ProductDetails(fullUrl=url,productId=pid, adWords=adWords, name=pName,imageUrl=imgUrl)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_coo8], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class Coo8Sort4PageParser(Coo8Sort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class Coo8SortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8SortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getCouponAndExGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
crashCutSeg = self.soup.find(name = 'span',attrs={'class':'D-fanxian'})
crashCutText = ''
if crashCutSeg:
crashCutText = crashCutSeg.getText()
return crashCutText
def getCouponAndExGift(self):
adSeg = self.soup.find(name = 'dl',attrs = {'id':'zengpin'})
ret = ''
if adSeg:
ret = adSeg.getText()
return ret
parserDict = {0:Coo8AllSortParser, 3:Coo8Sort3PageParser, 4:Coo8Sort4PageParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'coo8_2011-11-07_21-02-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8')
firstPage = Coo8AllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/280-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/353-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
content = result.content
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for prod in sort3Page.parserPageInfos():
print prod.logstr()
def testSortFinal():
urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/product/159376.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlsum)
finalPage = Coo8SortFinalParser(result.content, urlsum)
print finalPage.parserPageInfos()
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
#testSort3Details()
#testSortFinal()
s = '@'
print s.split('@')
| [
[
8,
0,
0.0326,
0.0326,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0558,
0.0047,
0,
0.66,
0.04,
764,
0,
1,
0,
0,
764,
0,
0
],
[
1,
0,
0.0605,
0.0047,
0,
0.66,
... | [
"'''\nCreated on 2011-8-7\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from cStringIO import StringIO",
"from coo8.image_price import captcha_coo8",
"from crawlerhttp import crawleRetries",
"from pageparser import *",
"from threadpool import ThreadPool, WorkRequest",
"import json",
"import o... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-7
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from coo8.image_price import captcha_coo8
from crawlerhttp import crawleRetries
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
from spiderconfigparser import SpiderConfig
coo8Root = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8',
isRecursed=True, catagoryLevel=0)
class Coo8AllSortParser(RootCatagoryPageParser):
'''
从http://www.coo8.com/allcatalog/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.coo8.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8AllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'class':'cateItems'})
for t in allSort.findAll(name='div', attrs={'class':re.compile('hd.*')}):#一级分类
sort_1 = t.find(name='h2')
name = sort_1['id']
url = ''.join((self.mainHost,name,'/'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':re.compile('bd.*')})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.h3.getText()
url = ''.join((self.mainHost, sort_1_urlsum.name, name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt(name='dd'):#三级分类
try:
name, url = ParserUtils.parserTag_A(ttt.a)
except Exception:
continue
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum,firstFinalPage=True)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class Coo8Sort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-{}-0-101101'
return '%s%s.%s' % (urlSegs[0].replace('-0-0-0-0',''), pageSeg, urlSegs[1])
def getTotal(self):
regx = u'共([0-9]*)页'
p = re.compile(regx)
pageSeg = self.soup.find(name='div', attrs={'class':'pageInfo'})
if pageSeg is None:
return 1
pageNum = pageSeg.getText()
totalNum = int(p.search(pageNum).group(1))
if totalNum > SpiderConfig.getMaxPage():
totalNum = SpiderConfig.getMaxPage()
return totalNum
def getAdWords(self,prod,prodUrl):
extraIconSeg = prod.find(name ='p',attrs={'class':'text-tag-wrap'})
adWords = ''
if extraIconSeg:
extraMsg = extraIconSeg.getText()
if extraMsg.find(u'返现') != -1 or extraMsg.find(u'赠品') != -1 \
or extraMsg.find(u'返券') != -1 :
sort_5_urlsum = ObuyUrlSummary(url=prodUrl)
result = crawleRetries(urlSum = sort_5_urlsum)
parser = Coo8SortFinalParser(dataStr = result.content,rootUrlSummary=sort_5_urlsum)
adWords = parser.parserPageInfos()
return adWords
def parserPageInfos(self):
resultList = []
plist = self.soup.find(name='div', attrs={'class':'srchContent'})
if plist is None:
#raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
for li in plist(name='li'):
pNameSeg = li.find(name='p', attrs={'class':'name'}).a
pName = pNameSeg['title']
imgUrlSeg = li.find(name='p',attrs={'class':'pic'}).img
imgUrl = ''
if imgUrlSeg:
imgUrl = imgUrlSeg['src']
pid = pNameSeg['href'].rsplit('/')[-1].split('.')[0]
url = pNameSeg['href']
if url and not url.startswith('http'):
url = ''.join((r'http://www.coo8.com',pNameSeg['href']))
adWords = self.getAdWords(prod = li,prodUrl=url)
priceImgUrl = li.find(name='p', attrs={'class':'price'}).img['src']
prodDetail = ProductDetails(fullUrl=url,productId=pid, adWords=adWords, name=pName,imageUrl=imgUrl)
prodDetail.catagory = self.rootUrlSummary
pimgUrlSumm = ObuyUrlSummary(url = priceImgUrl)
req = WorkRequest(getProductPrice, [pimgUrlSumm, prodDetail, resultList, pool,captcha_coo8], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class Coo8Sort4PageParser(Coo8Sort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8Sort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
class Coo8SortFinalParser(Parser):
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(Coo8SortFinalParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
crashCut = self.getCrashCut()
exGift = self.getCouponAndExGift()
return '@'.join((crashCut,exGift))
def getCrashCut(self):
crashCutSeg = self.soup.find(name = 'span',attrs={'class':'D-fanxian'})
crashCutText = ''
if crashCutSeg:
crashCutText = crashCutSeg.getText()
return crashCutText
def getCouponAndExGift(self):
adSeg = self.soup.find(name = 'dl',attrs = {'id':'zengpin'})
ret = ''
if adSeg:
ret = adSeg.getText()
return ret
parserDict = {0:Coo8AllSortParser, 3:Coo8Sort3PageParser, 4:Coo8Sort4PageParser}
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'coo8_2011-11-07_21-02-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.coo8.com/allcatalog/', name='coo8')
firstPage = Coo8AllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/280-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath, '280-0-0-0-0.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/products/353-0-0-0-0.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(sort_3_urlsum)
content = result.content
sort3Page = Coo8Sort3PageParser(content, sort_3_urlsum)
for prod in sort3Page.parserPageInfos():
print prod.logstr()
def testSortFinal():
urlsum = ObuyUrlSummary(url=r'http://www.coo8.com/product/159376.html', parentPath=[('test')], catagoryLevel=3)
result = crawleRetries(urlsum)
finalPage = Coo8SortFinalParser(result.content, urlsum)
print finalPage.parserPageInfos()
if __name__ == '__main__':
#testAllSortPage()
#testSort3Page()
#testSort3Details()
#testSortFinal()
s = '@'
print s.split('@')
| [
[
8,
0,
0.0326,
0.0326,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0558,
0.0047,
0,
0.66,
0.04,
764,
0,
1,
0,
0,
764,
0,
0
],
[
1,
0,
0.0605,
0.0047,
0,
0.66,
... | [
"'''\nCreated on 2011-8-7\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from cStringIO import StringIO",
"from coo8.image_price import captcha_coo8",
"from crawlerhttp import crawleRetries",
"from pageparser import *",
"from threadpool import ThreadPool, WorkRequest",
"import json",
"import o... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from coo8.coo8pageparser import parserDict,coo8Root
from spider import main
if __name__ == '__main__':
main(coo8Root,parserDict)
| [
[
8,
0,
0.4062,
0.375,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.6875,
0.0625,
0,
0.66,
0.3333,
430,
0,
2,
0,
0,
430,
0,
0
],
[
1,
0,
0.75,
0.0625,
0,
0.66,
... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n\n'''",
"from coo8.coo8pageparser import parserDict,coo8Root",
"from spider import main",
"if __name__ == '__main__':\n main(coo8Root,parserDict)",
" main(coo8Root,parserDict)"
] |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-9-19
@author: zhongfeng
'''
import sys,os
from dbproc.basedbproc import *
def createSiteCat(urls, siteName, catKeyFunc=getCatKey,saveFlag = True):
siteId = getSiteIdByName(siteName)
for urlsum in urls:
parent = urlsum.parentPath
path = []
for pUrl in parent:
rawCatId = catKeyFunc(pUrl.url)
if pUrl.catagoryLevel == 0:
id0 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id0 is None :
if saveFlag:
id0 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=0, url=pUrl.url,
name=pUrl.name, parentPath=[0], level=pUrl.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id0,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id0)
elif pUrl.catagoryLevel == 1:
id1 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id1 is None:
if saveFlag:
parentPath1 = [id0]
id1 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=id0, url=pUrl.url,
name=pUrl.name, parentPath=parentPath1, level=pUrl.catagoryLevel)
print 'new cat :id is id1 %s,name:%s,url:%s,level:%s' % (id1,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id1)
elif pUrl.catagoryLevel == 2:
id2 = getCatIdFromRawInfo(siteId, catUrl=pUrl.url)
if id2 is None:
if saveFlag:
parentPath2 = [id0, id1]
id2 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=id1, url=pUrl.url,
name=pUrl.name, parentPath=parentPath2, level=pUrl.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id2,pUrl.name,pUrl.url,pUrl.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (pUrl.name,pUrl.url,pUrl.catagoryLevel)
path.append(id2)
rawCatId = catKeyFunc(urlsum.url)
id3 = getCatIdFromRawInfo(siteId, catUrl=urlsum.url)
if id3 is None:
if saveFlag:
parentPath3 = path
id3 = saveProdCat(rawCatId=rawCatId, siteId=siteId, parentId=parentPath3[-1], url=urlsum.url,
name=urlsum.name, parentPath=parentPath3, level=urlsum.catagoryLevel)
print 'new cat :id is id0 %s,name:%s,url:%s,level:%s' % (id3,urlsum.name,urlsum.ur,urlsum.catagoryLevel)
else:
print 'new cat :name:%s,url:%s,level:%s' % (urlsum.name,urlsum.url,urlsum.catagoryLevel)
def getAllCatUrlSums(rootUrlSum, ParserClass, content = None,include = None,exclude = None):
if content is None:
from crawlerhttp import crawle
while True:
result = crawle(rootUrlSum)
if result.code == 200:
break
content = result.content
firstPage = ParserClass(content, rootUrlSum,include,exclude)
urlSums = firstPage.parserSubUrlSums()
return urlSums
def calEveryLevelCatNum(urlSums):
s0 = set()
s1 = set()
s2 = set()
s3 = set()
for sort_3 in urlSums:
print sort_3.name,sort_3.url
print seEncode(match55bigoCats(8,sort_3.name))
parentPath = sort_3.parentPath
s0.add(parentPath[0].url)
s1.add(parentPath[1].url)
if len(parentPath) > 2:
s2.add(parentPath[2].url)
s3.add(sort_3.url)
sa = set()
import itertools
for t in itertools.chain(s0, s1, s2, s3):
sa.add(str(getMd5Key(t))[0:16])
print len(sa)
print len(s0), len(s1), len(s2), len(s3)
def testSiteCat(rootUrlSum,ParserClass,content = None,updateDb = False):
urlSums = getAllCatUrlSums(rootUrlSum, ParserClass, content)
calEveryLevelCatNum(urlSums)
if updateDb:
createSiteCat(urlSums,rootUrlSum.name)
def preProcCats(rootUrlSum,ParserClass,content = None):
urlSums = getAllCatUrlSums(rootUrlSum, ParserClass, content)#获取网站当前的所有分类列表
siteId = getSiteIdByName(rootUrlSum.name)
noMatch = []
unDecided = []
for sort_3 in urlSums:
id3 = getCatIdFromRawInfo(siteId, catUrl=sort_3.url)
if id3 is not None:
continue
retArr = match55bigoCats(siteId,sort_3.name)
if len(retArr) == 0:
noMatch.append( '|'.join((sort_3.name,sort_3.url)))
continue
auRet = []
for t in retArr:
id,site_id,self_cat_id,name,url ,cat_base_id = t
flag = 0
if site_id == siteId:
if url == sort_3.url:
flag = 1
print '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),str(self_cat_id),str(cat_base_id)))
break
elif name == sort_3.name:
print '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),str(self_cat_id),str(cat_base_id)))
break
else:
auRet.append( '(%s,%s)' % (str(self_cat_id),name))
if len(auRet) > 0:
unDecided.append( '|'.join((sort_3.name,sort_3.url,name,str(id),str(flag),str(site_id),
str(self_cat_id),str(cat_base_id), seEncode(auRet))))
for newCat in noMatch:
print newCat
for unDeCat in unDecided:
print unDeCat
#
from pageparser import getParser
def __buildCatagory(parserDict,root,content = None):
parserClass = getParser(0, parserDict)
preProcCats(root, parserClass,None)
def build360BuyCat():
from j360buy.j360pageparser import parserDict,j360buyRoot
__buildCatagory(parserDict,j360buyRoot)
def buildEfeihuCat():
from efeihu.efeihupageparser import parserDict,efeihuRoot
__buildCatagory(parserDict,efeihuRoot)
def buildLusenCat():
from lusen.lusenpageparser import parserDict,lusenRoot
__buildCatagory(parserDict,lusenRoot)
def buildGomeCat():
from gome.gomepageparser import parserDict,gomeRoot
__buildCatagory(parserDict,gomeRoot)
def buildDangDangCat():
from dangdang.dangpageparser import parserDict,dangdangRoot
__buildCatagory(parserDict,dangdangRoot)
def buildNewEggCat():
from newegg.neweggpageparser import parserDict,newEggRoot
__buildCatagory(parserDict,newEggRoot)
def buildSuningCat():
from suning.suningparser import parserDict,sunningRoot
__buildCatagory(parserDict,sunningRoot)
def buildIcsonCat():
from icson.icsonpageparser import parserDict,icsonRoot
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
fileName = os.path.join(curPath, 'portal.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
__buildCatagory(parserDict,icsonRoot,content)
def buildCoo8Cat():
from coo8.coo8pageparser import parserDict,coo8Root
#===========================================================================
# curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
# fileName = os.path.join(curPath, 'coo8_allcat.htm')
# with open(fileName, 'r') as fInput:
# content = fInput.read()
#===========================================================================
__buildCatagory(parserDict,coo8Root)
def buildAmazonCat():
from amazon.amazonpageparser import rootUrlSummary,parserDict
from pageparser import ObuyUrlSummary
parserClass = getParser(0, parserDict)
include = [ObuyUrlSummary(name=name) for name in [u'home-appliances']]
exclude = [ObuyUrlSummary(name=name) for name in [u'video', u'aps', u'stripbooks', u'music', u'apparel', u'electronics', u'audio-visual-education']]
urlSumsSort1 = getAllCatUrlSums(rootUrlSum=rootUrlSummary,ParserClass=parserClass,exclude=exclude)
ret = []
for sort1 in urlSumsSort1:
print sort1.url
parserClass1 = getParser(1, parserDict)
urlSumsSort2 = getAllCatUrlSums(sort1, parserClass1)
ret.extend(urlSumsSort2)
calEveryLevelCatNum(ret)
createSiteCat(ret,rootUrlSummary.name)
def buildAmazonCat_New():
from amazon.amazonpageparser import rootUrlSummary,parserDict
from pageparser import ObuyUrlSummary
parserClass = getParser(0, parserDict)
include = [ObuyUrlSummary(name=name) for name in [u'appliances',u'communications',u'audio-visual',u'computers',u'office-products',
u'home-appliances',u'photo-video',u'music-players',u'automotive',u'software']]
urlSumsSort1 = getAllCatUrlSums(rootUrlSum=rootUrlSummary,ParserClass=parserClass,include=include)
ret = []
for sort1 in urlSumsSort1:
print sort1.name
parserClass1 = getParser(1, parserDict)
urlSumsSort2 = getAllCatUrlSums(sort1, parserClass1)
for sort2 in urlSumsSort2:
print ' %s' % sort2.name
parserClass2 = getParser(2, parserDict)
urlSumsSort3 = getAllCatUrlSums(sort2, parserClass2)
if not urlSumsSort3:
sort2.catagoryLevel = 3
ret.append(sort2)
else:
for sort3 in urlSumsSort3:
print ' %s' % sort3.name
ret.extend(urlSumsSort3)
calEveryLevelCatNum(ret)
createSiteCat(ret,rootUrlSummary.name)
def buildAllCat():
build360BuyCat()
buildGomeCat()
buildDangDangCat()
buildNewEggCat()
buildSuningCat()
buildIcsonCat()
buildCoo8Cat()
if __name__ == '__main__':
#from gome.gomepageparser import parserDict,gomeRoot
#parserClass = getParser(0, parserDict)
#urlSums = getAllCatUrlSums(rootUrlSum = gomeRoot, ParserClass=parserClass, content = None)
#createSiteCat(urls = urlSums, siteName = gomeRoot.name, saveFlag = False)
#build360BuyCat()
#buildGomeCat()
#buildSuningCat()
#buildAmazonCat_New()
#buildEfeihuCat()
#buildLusenCat()
buildCoo8Cat()
| [
[
8,
0,
0.0233,
0.0194,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0349,
0.0039,
0,
0.66,
0.0455,
509,
0,
2,
0,
0,
509,
0,
0
],
[
1,
0,
0.0388,
0.0039,
0,
0.66... | [
"'''\nCreated on 2011-9-19\n\n@author: zhongfeng\n'''",
"import sys,os",
"from dbproc.basedbproc import *",
"def createSiteCat(urls, siteName, catKeyFunc=getCatKey,saveFlag = True):\n siteId = getSiteIdByName(siteName)\n for urlsum in urls:\n parent = urlsum.parentPath\n path = []\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import glob
from dbproc.basedbproc import getConnect,initClientEncode
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def updateProdCat(conn,selfCatId,catBaseId,id):
''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''
curs = initClientEncode(conn)
sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,
`cat_base_id` = %s WHERE `prod_catagory`.`id` =%s '''
param = (selfCatId,catBaseId,id)
curs.execute(sqlStr, param)
curs.close()
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '%s' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def exportSiteProdCat(site_id,level=3):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT name, url, LEVEL FROM `prod_catagory` WHERE site_id =%s AND LEVEL =%s '
param = (site_id,level)
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def buildCatDbMap():#有一个问题没有解决,对应的level 1 与level 2没有对应上分类
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catBaseId = 1
conn = getConnect()
for f in glob.glob1(curPath, 'lusen.3C'):
print f
for line in file(f):
line = line.strip()
if line == '':
continue
ret = line.split('|')
try:
selfCatId = int(ret[-1])
except ValueError:
selfCatId = 0
catBaseId = 0
else:
catBaseId = 1
id = ret[-2]
print selfCatId, catBaseId, id
updateProdCat(conn, selfCatId, catBaseId, id)
conn.close()
if __name__ == '__main__':
#buildCatDbMap()
f = file(r"c:\amazon.cat",'w')
for t in exportSiteProdCat(site_id=1):
s = seEncode(t)
f.write(s)
f.write(os.linesep)
f.close() | [
[
1,
0,
0.0533,
0.0133,
0,
0.66,
0,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.0667,
0.0133,
0,
0.66,
0.1429,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.08,
0.0133,
0,
0.... | [
"import os,sys",
"import glob",
"from dbproc.basedbproc import getConnect,initClientEncode",
"def updateProdCat(conn,selfCatId,catBaseId,id):\n ''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''\n curs = initClientEncode(conn)\n sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys
import glob
from dbproc.basedbproc import getConnect,initClientEncode
#===============================================================================
# 表 `prod_catagory` db proc
#===============================================================================
def updateProdCat(conn,selfCatId,catBaseId,id):
''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''
curs = initClientEncode(conn)
sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,
`cat_base_id` = %s WHERE `prod_catagory`.`id` =%s '''
param = (selfCatId,catBaseId,id)
curs.execute(sqlStr, param)
curs.close()
def seEncode(ustr, encoding='utf-8'):
'''负责把入数据库的字符串,转化成utf-8编码'''
if ustr is None:
return ''
if isinstance(ustr, unicode):
return ustr.encode(encoding, 'ignore')
elif isinstance(ustr, (list,tuple,set)):
return '%s' % ','.join([seEncode(s,encoding) for s in ustr])
return str(ustr)
def exportSiteProdCat(site_id,level=3):
conn = getConnect()
curs = initClientEncode(conn)
sqlStr = 'SELECT name, url, LEVEL FROM `prod_catagory` WHERE site_id =%s AND LEVEL =%s '
param = (site_id,level)
curs.execute(sqlStr,param)
result = curs.fetchall()
curs.close()
conn.close()
return result
def buildCatDbMap():#有一个问题没有解决,对应的level 1 与level 2没有对应上分类
curPath = os.path.abspath(os.path.dirname(sys.argv[0]))
catBaseId = 1
conn = getConnect()
for f in glob.glob1(curPath, 'lusen.3C'):
print f
for line in file(f):
line = line.strip()
if line == '':
continue
ret = line.split('|')
try:
selfCatId = int(ret[-1])
except ValueError:
selfCatId = 0
catBaseId = 0
else:
catBaseId = 1
id = ret[-2]
print selfCatId, catBaseId, id
updateProdCat(conn, selfCatId, catBaseId, id)
conn.close()
if __name__ == '__main__':
#buildCatDbMap()
f = file(r"c:\amazon.cat",'w')
for t in exportSiteProdCat(site_id=1):
s = seEncode(t)
f.write(s)
f.write(os.linesep)
f.close() | [
[
1,
0,
0.0533,
0.0133,
0,
0.66,
0,
688,
0,
2,
0,
0,
688,
0,
0
],
[
1,
0,
0.0667,
0.0133,
0,
0.66,
0.1429,
958,
0,
1,
0,
0,
958,
0,
0
],
[
1,
0,
0.08,
0.0133,
0,
0.... | [
"import os,sys",
"import glob",
"from dbproc.basedbproc import getConnect,initClientEncode",
"def updateProdCat(conn,selfCatId,catBaseId,id):\n ''' 更新各站点三级分类的对应的自有分类和所属大类别信息 '''\n curs = initClientEncode(conn)\n sqlStr = '''UPDATE `prod_catagory` SET `self_cat_id` = %s,\n ... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-11
@author: zhongfeng
'''
from itertools import chain
import os
from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode
def createSiteCat(site_id):
catDict = {}
for level in xrange(0,3):
catLevelRet = getCatBySiteIdAndLevel(site_id, level)
catDict.update(((int(it[0]),it[1]) for it in catLevelRet))
finalLevelRet = getCatBySiteIdAndLevel(site_id, 3)
result = []
for t in finalLevelRet:
tr = [seEncode(it, encoding='gb18030') for it in chain([(pId,catDict[int(pId)]) for pId in eval(t[-1])],(t[1],t[0],os.linesep))]
result.append( '|'.join([s.replace('|',' ') for s in tr]))
#result.append(os.linesep)
return result
if __name__ == '__main__':
baseDir = r'c:\catinfo'
if not os.path.exists(baseDir):
os.makedirs(baseDir)
for en_name,id in getAllWebsiteBaseInfo():
print en_name,id
fName = os.path.join(baseDir,en_name)
result = createSiteCat(id)
with open(fName,'w') as output:
output.writelines(result)
| [
[
8,
0,
0.1714,
0.1429,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2571,
0.0286,
0,
0.66,
0.2,
808,
0,
1,
0,
0,
808,
0,
0
],
[
1,
0,
0.2857,
0.0286,
0,
0.66,
... | [
"'''\nCreated on 2011-11-11\n\n@author: zhongfeng\n'''",
"from itertools import chain",
"import os",
"from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode",
"def createSiteCat(site_id):\n catDict = {}\n for level in xrange(0,3):\n catLevelRet = getCatBySiteIdAndLe... |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-11-11
@author: zhongfeng
'''
from itertools import chain
import os
from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode
def createSiteCat(site_id):
catDict = {}
for level in xrange(0,3):
catLevelRet = getCatBySiteIdAndLevel(site_id, level)
catDict.update(((int(it[0]),it[1]) for it in catLevelRet))
finalLevelRet = getCatBySiteIdAndLevel(site_id, 3)
result = []
for t in finalLevelRet:
tr = [seEncode(it, encoding='gb18030') for it in chain([(pId,catDict[int(pId)]) for pId in eval(t[-1])],(t[1],t[0],os.linesep))]
result.append( '|'.join([s.replace('|',' ') for s in tr]))
#result.append(os.linesep)
return result
if __name__ == '__main__':
baseDir = r'c:\catinfo'
if not os.path.exists(baseDir):
os.makedirs(baseDir)
for en_name,id in getAllWebsiteBaseInfo():
print en_name,id
fName = os.path.join(baseDir,en_name)
result = createSiteCat(id)
with open(fName,'w') as output:
output.writelines(result)
| [
[
8,
0,
0.1714,
0.1429,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2571,
0.0286,
0,
0.66,
0.2,
808,
0,
1,
0,
0,
808,
0,
0
],
[
1,
0,
0.2857,
0.0286,
0,
0.66,
... | [
"'''\nCreated on 2011-11-11\n\n@author: zhongfeng\n'''",
"from itertools import chain",
"import os",
"from dbproc.basedbproc import getCatBySiteIdAndLevel,getAllWebsiteBaseInfo,seEncode",
"def createSiteCat(site_id):\n catDict = {}\n for level in xrange(0,3):\n catLevelRet = getCatBySiteIdAndLe... |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import ez_setup
import shutil, sys, os, glob
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import py2exe
curpath = os.path.dirname(os.path.abspath(__file__))
def find_py_modules():
fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(curpath, r'src/*.py')))
return [os.path.splitext(fileName)[0] for fileName in fileList]
def find_data_files(base_dir, files=['logging.conf', 'urls.cfg', 'spider.conf']):
data_files = []
for fAbsPath in (os.path.join(curpath,base_dir ,f) for f in files):
if os.path.exists(fAbsPath):
data_files.append(fAbsPath)
return data_files
def singleSetUp(site_name):
dist_dir = 'dist/%s' % site_name
if os.path.isdir(dist_dir): # 删除上次的生成结果
print 'rm %s dist_dir first' % dist_dir
shutil.rmtree(dist_dir)
bDir = 'src/%s' % site_name
setup(
name = "%sspider" % site_name,
cmdclass = {'py2exe': py2exe.build_exe.py2exe},
version = '1.0',
packages = find_packages(bDir),# include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
py_modules = find_py_modules(),
console = ['src/%s/%sspider.py' % (site_name,site_name)],
zip_safe=True,
#test_suite = "test.test_enum.suite",
package_data={'': ["*.*"],},
options={'py2exe': {'optimize': 2,
'compressed': True,
'dist_dir': dist_dir, } },
#data_files = find_data_files(bDir),
# installed or upgraded on the target machine
install_requires=['chardet', 'enum', 'BeautifulSoup', 'threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author="zhongfeng",
author_email="fzhong@travelsky.com",
description="21obuys Package",
license="PSF",
keywords="crawlers",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
class BuildSpiderExe(object):
def __init__(self):
self.sites=['amazon','coo8','dangdang','gome','icson','j360buy',
'newegg','suning','efeihu','lusen']
#self.sites = ['j360buy']
def run(self):
for site_name in self.sites:
singleSetUp(site_name)
if os.path.isdir('build'): # 清除build文件夹
shutil.rmtree('build')
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv.append('py2exe')
BuildSpiderExe().run()
| [
[
1,
0,
0.0488,
0.0122,
0,
0.66,
0,
650,
0,
1,
0,
0,
650,
0,
0
],
[
1,
0,
0.061,
0.0122,
0,
0.66,
0.1,
614,
0,
4,
0,
0,
614,
0,
0
],
[
8,
0,
0.0854,
0.0122,
0,
0.66... | [
"import ez_setup",
"import shutil, sys, os, glob",
"ez_setup.use_setuptools()",
"from setuptools import setup, find_packages",
"import py2exe",
"curpath = os.path.dirname(os.path.abspath(__file__))",
"def find_py_modules():\n\n fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(cu... |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import ez_setup
import shutil, sys, os, glob
ez_setup.use_setuptools()
from setuptools import setup, find_packages
import py2exe
curpath = os.path.dirname(os.path.abspath(__file__))
def find_py_modules():
fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(curpath, r'src/*.py')))
return [os.path.splitext(fileName)[0] for fileName in fileList]
def find_data_files(base_dir, files=['logging.conf', 'urls.cfg', 'spider.conf']):
data_files = []
for fAbsPath in (os.path.join(curpath,base_dir ,f) for f in files):
if os.path.exists(fAbsPath):
data_files.append(fAbsPath)
return data_files
def singleSetUp(site_name):
dist_dir = 'dist/%s' % site_name
if os.path.isdir(dist_dir): # 删除上次的生成结果
print 'rm %s dist_dir first' % dist_dir
shutil.rmtree(dist_dir)
bDir = 'src/%s' % site_name
setup(
name = "%sspider" % site_name,
cmdclass = {'py2exe': py2exe.build_exe.py2exe},
version = '1.0',
packages = find_packages(bDir),# include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
py_modules = find_py_modules(),
console = ['src/%s/%sspider.py' % (site_name,site_name)],
zip_safe=True,
#test_suite = "test.test_enum.suite",
package_data={'': ["*.*"],},
options={'py2exe': {'optimize': 2,
'compressed': True,
'dist_dir': dist_dir, } },
#data_files = find_data_files(bDir),
# installed or upgraded on the target machine
install_requires=['chardet', 'enum', 'BeautifulSoup', 'threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author="zhongfeng",
author_email="fzhong@travelsky.com",
description="21obuys Package",
license="PSF",
keywords="crawlers",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
class BuildSpiderExe(object):
def __init__(self):
self.sites=['amazon','coo8','dangdang','gome','icson','j360buy',
'newegg','suning','efeihu','lusen']
#self.sites = ['j360buy']
def run(self):
for site_name in self.sites:
singleSetUp(site_name)
if os.path.isdir('build'): # 清除build文件夹
shutil.rmtree('build')
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.argv.append('py2exe')
BuildSpiderExe().run()
| [
[
1,
0,
0.0488,
0.0122,
0,
0.66,
0,
650,
0,
1,
0,
0,
650,
0,
0
],
[
1,
0,
0.061,
0.0122,
0,
0.66,
0.1,
614,
0,
4,
0,
0,
614,
0,
0
],
[
8,
0,
0.0854,
0.0122,
0,
0.66... | [
"import ez_setup",
"import shutil, sys, os, glob",
"ez_setup.use_setuptools()",
"from setuptools import setup, find_packages",
"import py2exe",
"curpath = os.path.dirname(os.path.abspath(__file__))",
"def find_py_modules():\n\n fileList = (os.path.split(full)[-1] for full in glob.glob(os.path.join(cu... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from j360buy.j360_feature import __360buy_FEATURES_MAP__
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_360Buy(CaptchaProfile):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.__catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in __360buy_FEATURES_MAP__.iteritems()])
cls._inst = super(CaptchaProfile_360Buy, cls).__new__(cls)
return cls._inst
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def split(self, im):
matrix = {(48,12) : [(15, 3, 21, 11), (23, 3, 25, 11),(27,3,33,11),(35,3,41,11)],
(52,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,33,11),(35,3,41,11),(43,3,49,11)],
(65,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,41,11),(43,3,49,11),(51,3,57,11)],
(75,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,49,11),(51,3,57,11),(59, 3, 65, 11)],
(80,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,53,11),(55,3,57,11),(59, 3, 65, 11),(67,3,73,11)]
}
return [im.crop(box) for box in matrix[im.size]]
def match(self, im):
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.__catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im)
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(__360buy_FEATURES_MAP__, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
#print minimal
return __360buy_FEATURES_MAP__[minimal]
def captcha_360buy(filename):
return captcha(filename, CaptchaProfile_360Buy())
def test():
print captcha_360buy(r'c:\gp359329,2.png')
if __name__ == '__main__':
im = Image.open(r'c:\1.png')
im2 = Image.open(r'c:\1.png')
diff = ImageChops.difference(im, im2)
im = im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
dt = im.getdata()
print im.size
it1 = im.crop((15, 3, 21, 11))
it2 = im.crop((23, 3, 29, 11))
it3 = im.crop((31, 3, 37, 11))
it4 = im.crop((39, 3, 45, 11))
it5 = im.crop((47, 3, 49, 11))
it6 = im.crop((51, 3, 57, 11))
it7 = im.crop((59, 3, 65, 11))
cia = CaptchaImageAlgorithm()
s7 = cia.GetBinaryMap(it1)
print s7
profile = CaptchaProfile_360Buy()
print '+++++++++++++++++++++++++++'
for t in range(100):
print captcha_360buy(r'c:\5.png')
| [
[
8,
0,
0.0769,
0.0769,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1319,
0.011,
0,
0.66,
0.0909,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1429,
0.011,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from j360buy.j360_feature import __360buy_FEATURES_MAP__",
"import Image",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from cStringIO import StringIO
from j360buy.image_price import captcha_360buy
from crawlerhttp import crawle
from pageparser import *
from threadpool import ThreadPool, WorkRequest
import json
import os
import re
import threading
import urllib
class J360buyAllSortParser(RootCatagoryPageParser):
'''
从http://www.360buy.com/allSort.aspx获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.360buy.com'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buyAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='div', attrs={'id':'allsort'})
for t in allSort.findAll(name='div', attrs={'id':re.compile('JDS_[0-9]+')}):#一级分类
sort_1 = t.find(name='div', attrs={'class':'mt'})
name, url = ParserUtils.parserTag_A(sort_1.h2.a)
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(name='div', attrs={'class':'mc'})
for tt in sort_2(name='dl'):#二级分类
name, url = ParserUtils.parserTag_A(tt.dt.a)
url = ''.join((self.mainHost, url))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd(name='em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
url = ''.join((self.mainHost, '/', url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class J360buySort3PageParser(Sort3PageParser):
'''
360Buy三级页面解析类
'''
pricePageNum = 8
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div', attrs={'id':'filter'}).find(attrs={'class':'pagin pagin-m fr'})
totalPage = int(pageSeg.span.string.split('/')[-1])
return totalPage
def __getAdWords(self, plist):
adQueryDict = eval(re.compile(r'{.*}').search(str(plist.script)).group())
baseUrl = 'http://www.360buy.com/JdService.aspx?callback=GetJdwsmentsCallback&action=GetJdwsment'
url = '&'.join((baseUrl, urllib.urlencode(adQueryDict)))
result = crawle(url)
ct = re.compile(r'{.*}').search(result.content)
if ct is None:
return []
jObj = json.loads(ct.group())
return jObj['html']
def parserPageInfos(self):
def getProductPrice(*req):
priceImgUrl = req[0]
result = crawle(priceImgUrl)
proc_normal_result(req, result)
print 'Get price:%s' % priceImgUrl
return result
def proc_normal_result(req, result):
args = req
if result.code == 200:
prodDetail = args[1]
resultList = args[2]
prodDetail.privPrice = captcha_360buy(StringIO(result.content))
resultList.append(prodDetail)
else:
print args[0]
resultList = []
plist = self.soup.find(name='div', attrs={'id':'plist'})
if plist is None:
raise Exception("Page Error")
return resultList
try:
pool = ThreadPool(self.pricePageNum)
pid_ad = dict([[int(wa['Wid']), wa['AdTitle']] for wa in self.__getAdWords(plist)])
for li in plist(name='li', attrs={'sku':re.compile('[0-9]+')}):
pid = int(li['sku'])
pName = li.find(name='div', attrs={'class':'p-name'}).a.getText()
priceImgUrl = li.find(name='div', attrs={'class':'p-price'}).img['src']
adWords = pid_ad.get(pid, '')
prodDetail = ProductDetails(productId=pid, name=pName, adWords=adWords)
req = WorkRequest(getProductPrice, [priceImgUrl, prodDetail, resultList, pool], None,
callback=None)
pool.putRequest(req)
pool.wait()
except Exception,e:
raise e
finally:
pool.dismissWorkers(num_workers=self.pricePageNum)
return resultList
class J360buySort4PageParser(J360buySort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(J360buySort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def test360BuyAllSortPage():
fileName = os.path.join(testFilePath, 'allSort.aspx')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.360buy.com/allSort.aspx', name='360buy')
firstPage = J360buyAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
print '/'.join(sort_3.getSavePathL())
print sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4
def testSort3Details():
fileName = os.path.join(testFilePath, '360buy_2011-08-15_12-26-01.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.360buy.com/products/737-794-870.html', parentPath=[('test')], catagoryLevel=3)
sort3Page = J360buySort3PageParser(content, sort_3_urlsum)
sort3Page.parserPageInfos()
if __name__ == '__main__':
#test360BuyAllSortPage()
#testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0414,
0.0414,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.071,
0.0059,
0,
0.66,
0.05,
764,
0,
1,
0,
0,
764,
0,
0
],
[
1,
0,
0.0769,
0.0059,
0,
0.66,
... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from cStringIO import StringIO",
"from j360buy.image_price import captcha_360buy",
"from crawlerhttp import crawle",
"from pageparser import *",
"from threadpool import ThreadPool, WorkRequest",
"import json",
"import os... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
__360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | [
[
8,
0,
0.0282,
0.0235,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5258,
0.9531,
0,
0.66,
1,
768,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"__360buy_FEATURES_MAP__ = {\n '''\n __\n __\n __\n __\n __\n __"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
from captcha_price import *
from j360buy.j360_feature import __360buy_FEATURES_MAP__
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaProfile_360Buy(CaptchaProfile):
def __init__(self):
pass
def __new__(cls):
'''
单态实现,初始化一次
'''
if '_inst' not in vars(cls):
cls.__catagory_FEATURES_MAP__ = dict([(feature_to_data(key),value) for key,value in __360buy_FEATURES_MAP__.iteritems()])
cls._inst = super(CaptchaProfile_360Buy, cls).__new__(cls)
return cls._inst
def filter(self, im):
return im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
def split(self, im):
matrix = {(48,12) : [(15, 3, 21, 11), (23, 3, 25, 11),(27,3,33,11),(35,3,41,11)],
(52,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,33,11),(35,3,41,11),(43,3,49,11)],
(65,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,41,11),(43,3,49,11),(51,3,57,11)],
(75,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,49,11),(51,3,57,11),(59, 3, 65, 11)],
(80,12) : [(15, 3, 21, 11), (23, 3, 29, 11),(31,3,37,11),(39,3,45,11),(47,3,53,11),(55,3,57,11),(59, 3, 65, 11),(67,3,73,11)]
}
return [im.crop(box) for box in matrix[im.size]]
def match(self, im):
imageData = feature_to_data(CaptchaImageAlgorithm.GetBinaryMap(im))
result = self.__catagory_FEATURES_MAP__.get(imageData,None)
if result != None:
return result
print CaptchaImageAlgorithm.GetBinaryMap(im)
source = im.getdata()
algorithm = CaptchaAlgorithm()
minimal = min(__360buy_FEATURES_MAP__, key=lambda feature:algorithm.LevenshteinDistance(source, feature_to_data(feature)))
#print minimal
return __360buy_FEATURES_MAP__[minimal]
def captcha_360buy(filename):
return captcha(filename, CaptchaProfile_360Buy())
def test():
print captcha_360buy(r'c:\gp359329,2.png')
if __name__ == '__main__':
im = Image.open(r'c:\1.png')
im2 = Image.open(r'c:\1.png')
diff = ImageChops.difference(im, im2)
im = im.filter(ImageFilter.EDGE_ENHANCE_MORE).convert('L').convert('1')
dt = im.getdata()
print im.size
it1 = im.crop((15, 3, 21, 11))
it2 = im.crop((23, 3, 29, 11))
it3 = im.crop((31, 3, 37, 11))
it4 = im.crop((39, 3, 45, 11))
it5 = im.crop((47, 3, 49, 11))
it6 = im.crop((51, 3, 57, 11))
it7 = im.crop((59, 3, 65, 11))
cia = CaptchaImageAlgorithm()
s7 = cia.GetBinaryMap(it1)
print s7
profile = CaptchaProfile_360Buy()
print '+++++++++++++++++++++++++++'
for t in range(100):
print captcha_360buy(r'c:\5.png')
| [
[
8,
0,
0.0769,
0.0769,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.1319,
0.011,
0,
0.66,
0.0909,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.1429,
0.011,
0,
0.66,
... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"from captcha_price import *",
"from j360buy.j360_feature import __360buy_FEATURES_MAP__",
"import Image",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:J360buyAllSortParser,3:J360buySort3PageParser,4:J360buySort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.360buy.com/products/737-964-795.html',name='360buy',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-659-0-0-0-0-0-0-0-1-1-2.html',
name='digital',isRecursed = False,catagoryLevel = 4)
j360buyRoot = ObuyUrlSummary(url = r'http://www.360buy.com/allSort.aspx',name='360buy',
isRecursed = True,catagoryLevel = 0)
pcare = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-000.html',
name='手机',isRecursed = False,catagoryLevel = 2)
pdigital = ObuyUrlSummary(url = r'http://www.360buy.com/digital.html',name='digital',catagoryLevel = 1)
pelectronic = ObuyUrlSummary(url = r'http://www.360buy.com/electronic.html',name='electronic',catagoryLevel = 1)
pcomputer = ObuyUrlSummary(url = r'http://www.360buy.com/computer.html',name='computer',catagoryLevel = 1)
includes = [pelectronic,pdigital,pcomputer]
spider = ObuySpider(rootUrlSummary = j360buyRoot,parserDict = parserDict,include = includes,exclude = None,threadNum = 5)
spider.spide()
| [
[
8,
0,
0.1463,
0.122,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2439,
0.0244,
0,
0.66,
0.25,
566,
0,
3,
0,
0,
566,
0,
0
],
[
1,
0,
0.2683,
0.0244,
0,
0.66,
... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n'''",
"from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:J360buyAllSortParser,3:J360buyS... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-16
@author: zhongfeng
'''
__360buy_FEATURES_MAP__ = {
'''
__
__
__
__
__
__
##
##
'''
:
'.',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
#___##
_###__
'''
:
'0',
'''
_####_
##__##
#___#_
##__##
#___#_
##__##
##__##
_###__
'''
:
'0',
'''
__##__
_#_#__
__##__
__##__
__#___
__##__
__#___
_####_
'''
:
'1',
'''
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
___##_
__#___
_##___
#_____
######
'''
:
'2',
'''
_####_
##__##
____#_
__##__
____##
____##
##__#_
_####_
'''
:
'3',
'''
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
____#_
___##_
__#_#_
_#_##_
#__#__
######
____#_
___##_
'''
:
'4',
'''
_#####
_#____
_##___
_#_##_
____##
____#_
##__##
_###__
'''
:
'5',
'''
__###_
_##___
#_____
#####_
#___##
##__#_
#___##
_###__
'''
:
'6',
'''
######
____#_
___##_
___#__
__##__
__#___
_##___
_#____
'''
:
'7',
'''
_####_
##__##
#___#_
_###__
##__##
##__##
#___#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
_###__
##__##
#___##
##__#_
_####_
'''
:
'8',
'''
_####_
##__##
#___#_
##__##
_##_##
____##
___#__
_###__
'''
:
'9',
'''
_####_
##__##
#___#_
##__##
_###_#
____##
___#__
_###__
'''
:
'9',
} | [
[
8,
0,
0.0282,
0.0235,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
14,
0,
0.5258,
0.9531,
0,
0.66,
1,
768,
0,
0,
0,
0,
0,
6,
0
]
] | [
"'''\nCreated on 2011-8-16\n\n@author: zhongfeng\n'''",
"__360buy_FEATURES_MAP__ = {\n '''\n __\n __\n __\n __\n __\n __"
] |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-1
@author: zhongfeng
'''
from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:J360buyAllSortParser,3:J360buySort3PageParser,4:J360buySort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.360buy.com/products/737-964-795.html',name='360buy',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-659-0-0-0-0-0-0-0-1-1-2.html',
name='digital',isRecursed = False,catagoryLevel = 4)
j360buyRoot = ObuyUrlSummary(url = r'http://www.360buy.com/allSort.aspx',name='360buy',
isRecursed = True,catagoryLevel = 0)
pcare = ObuyUrlSummary(url = r'http://www.360buy.com/products/652-653-000.html',
name='手机',isRecursed = False,catagoryLevel = 2)
pdigital = ObuyUrlSummary(url = r'http://www.360buy.com/digital.html',name='digital',catagoryLevel = 1)
pelectronic = ObuyUrlSummary(url = r'http://www.360buy.com/electronic.html',name='electronic',catagoryLevel = 1)
pcomputer = ObuyUrlSummary(url = r'http://www.360buy.com/computer.html',name='computer',catagoryLevel = 1)
includes = [pelectronic,pdigital,pcomputer]
spider = ObuySpider(rootUrlSummary = j360buyRoot,parserDict = parserDict,include = includes,exclude = None,threadNum = 5)
spider.spide()
| [
[
8,
0,
0.1463,
0.122,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2439,
0.0244,
0,
0.66,
0.25,
566,
0,
3,
0,
0,
566,
0,
0
],
[
1,
0,
0.2683,
0.0244,
0,
0.66,
... | [
"'''\nCreated on 2011-8-1\n\n@author: zhongfeng\n'''",
"from j360pageparser import J360buyAllSortParser,J360buySort3PageParser,J360buySort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:J360buyAllSortParser,3:J360buyS... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def fiter(self, im):
raise NotImplemented
def split(self, im):
raise NotImplemented
def match(self, im):
raise NotImplemented
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| [
[
8,
0,
0.056,
0.056,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.096,
0.008,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.104,
0.008,
0,
0.66,
0.2... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"import Image",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
" import psyco",
" psyco.full()",
"class CaptchaAlgorithm(object):\n ... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-8-02
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class IcsonAllSortParser(RootCatagoryPageParser):
'''
从http://sz.icson.com/portal.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = 'http://sz.icson.com/'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={'id':'protal_list'})
for t in allSort.findAll(name='div',attrs={'class':'item_hd'}):#一级分类
name,url = ParserUtils.parserTag_A(t.find(name='a'))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='div',attrs={'class':'item_bd'})
for tt in sort_2(name='dl'):#二级分类
name = tt.dt.getText()
url = ''.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.findAll(name='a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class IcsonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-6-10-20-0-{}--'
return '%s%s.%s' % (urlSegs[0].replace('--------',''), pageSeg, urlSegs[1])
def getTotal(self):
nextSeg = self.soup.find(name='a',attrs={'class':'page-next'})
if nextSeg != None:
t = nextSeg.findPreviousSibling(name='a').getText()
return int(t)
else:
return 1
def parserPageInfos(self):
plist = self.soup.find(name='li',attrs={'class':'item_list'})
resultList = []
for prod in plist:
pNameSeg = prod.find(attrs={'class':'wrap_info'})
pName,url = ParserUtils.parserTag_A(pNameSeg.a)
adWords = pNameSeg.find(name='p',attrs={'class':'hot'})
pid = url.rsplit('-',1)[-1].split('.')[0]
t = prod.find(attrs={'class':'price_icson'})
if t != None:
currentPrice = ParserUtils.getPrice(t.getText())
else:
currentPrice = 0.00
prodDetail = ProductDetails(productId=pid, privPrice = currentPrice,
name=pName, adWords=adWords)
resultList.append(prodDetail)
return resultList
class IcsonSort4PageParser(IcsonSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(IcsonSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testIcsonAllSortPage():
fileName = os.path.join(testFilePath,'dangcat.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://category.dangdang.com/', name='Icson')
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
firstPage = IcsonAllSortParser(content, rootUrlSum,include = [pserver])
for sort_3 in firstPage.parserSubUrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
print urlsum.name
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'dangdang_2011-08-19_13-03-03.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://category.dangdang.com/list?cat=4001011&store=eq0',
parentPath=[('test')], catagoryLevel=3)
sort3Page = IcsonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testRegx():
regx = u'共([0-9]*)页'
p = re.compile(regx)
fileName = os.path.join(testFilePath,'4001011.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
s = soup.find(name='span',attrs = {'id':'all_num'}).getText()
content = content.decode('gb18030','ignore')
print p.search(s).group(1)
if __name__ == '__main__':
#testRegx()
#testIcsonAllSortPage()
testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0479,
0.0479,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.089,
0.0068,
0,
0.66,
0.0769,
488,
0,
1,
0,
0,
488,
0,
0
],
[
3,
0,
0.1849,
0.1712,
0,
0.66,... | [
"'''\nCreated on 2011-8-02\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"class IcsonAllSortParser(RootCatagoryPageParser):\n '''\n 从http://sz.icson.com/portal.html获取所有的分类信息,\n 组合成ObuyUrlSummary\n ''' \n mainHost = 'http://sz.icson.com/'\n ... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:IcsonAllSortParser,3:IcsonSort3PageParser,4:IcsonSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
IcsonRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = IcsonRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | [
[
8,
0,
0.1667,
0.1389,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2778,
0.0278,
0,
0.66,
0.25,
965,
0,
3,
0,
0,
965,
0,
0
],
[
1,
0,
0.3056,
0.0278,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:IcsonAllSortParser,3:IcsonSort... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:IcsonAllSortParser,3:IcsonSort3PageParser,4:IcsonSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
IcsonRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = IcsonRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | [
[
8,
0,
0.1667,
0.1389,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2778,
0.0278,
0,
0.66,
0.25,
965,
0,
3,
0,
0,
965,
0,
0
],
[
1,
0,
0.3056,
0.0278,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from icson.icsonpageparser import IcsonAllSortParser,IcsonSort3PageParser,IcsonSort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:IcsonAllSortParser,3:IcsonSort... |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# setup.py
# Part of 21obuys, a package providing enumerated types for Python.
#
# Copyright © 2007 Ben Finney
# This is free software; you may copy, modify and/or distribute this work
# under the terms of the GNU General Public License, version 2 or later
# or, at your option, the terms of the Python license.
import ez_setup
ez_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name = "21obus",
version = '1.0',
scripts = ['src/360buyspider.py'],
packages = find_packages('src'), # include all packages under src
package_dir = {'':'src'}, # tell distutils packages are under src
#py_modules = [main_module_name],
# setuptools metadata
zip_safe = True,
#test_suite = "test.test_enum.suite",
#package_data = {
# '': ["LICENSE.*"],
#},
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires = ['chardet','enum','BeautifulSoup','threadpool'],
# PyPI metadata
# metadata for upload to PyPI
author = "zhongfeng",
author_email = "fzhong@travelsky.com",
description = "21obuys Package",
license = "PSF",
keywords = "360buy newegg crawlers",
classifiers = [
"Development Status :: 4 - Beta",
"License :: OSI Approved :: GNU General Public License (GPL)",
"License :: OSI Approved :: Python Software Foundation License",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
],
)
| [
[
1,
0,
0.22,
0.02,
0,
0.66,
0,
650,
0,
1,
0,
0,
650,
0,
0
],
[
8,
0,
0.24,
0.02,
0,
0.66,
0.3333,
479,
3,
0,
0,
0,
0,
0,
1
],
[
1,
0,
0.28,
0.02,
0,
0.66,
0.66... | [
"import ez_setup",
"ez_setup.use_setuptools()",
"from setuptools import setup, find_packages",
"setup(\n name = \"21obus\",\n version = '1.0',\n scripts = ['src/360buyspider.py'],\n packages = find_packages('src'), # include all packages under src\n package_dir = {'':'src'}, # tell distutils... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
import re
import time
import random
from BeautifulSoup import BeautifulSoup
from crawlerhttp import UrlSummary, crawle
urlsProxy = ["http://proxy.ipcn.org/proxylist.html"]
#urlsProxy = ["http://www.proxycn.com/html_proxy/http-1.html"]
desSite = 'http://www.360buy.com'
class ChoiceProxy(object):
proxyList = []
def __init__(self):
pass
def __new__(cls):
if '_inst' not in vars(cls):
cls.__initProxyList()
cls._inst = super(ChoiceProxy, cls).__new__(cls)
return cls._inst
@classmethod
def __initProxyList(cls):
ipcnProxyPageResult = crawle(urlsProxy[0])
if ipcnProxyPageResult.code == 200:
#soup = BeautifulSoup(ipcnProxyPageResult.content)
#proxyContents = soup.find('pre').contents[0]
p = re.compile(r'(\d+\.\d+\.\d+\.\d+:[0-9]+)')
for proxyIp in p.findall(ipcnProxyPageResult.content):
if(cls.__testProxy(proxyIp)):
print proxyIp
cls.proxyList.append(proxyIp)
@classmethod
def __testProxy(cls, proxy):
proxyDicts = {'http':proxy}
start = time.time()
result = crawle(desSite, proxy = proxyDicts)
end = time.time()
estime = end - start
print proxy, estime
if result.code != 200 or estime > 10:
return False
return True
@staticmethod
def choice():
if len(ChoiceProxy.proxyList) == 0:
return None
return random.choice(ChoiceProxy.proxyList)
def choiceHttpProxy():
return {'http':ChoiceProxy.choice()}
if __name__ == '__main__':
for i in range(10):
print ChoiceProxy().choice()
| [
[
1,
0,
0.0656,
0.0164,
0,
0.66,
0,
540,
0,
1,
0,
0,
540,
0,
0
],
[
1,
0,
0.082,
0.0164,
0,
0.66,
0.1111,
654,
0,
1,
0,
0,
654,
0,
0
],
[
1,
0,
0.0984,
0.0164,
0,
0... | [
"import re",
"import time",
"import random",
"from BeautifulSoup import BeautifulSoup",
"from crawlerhttp import UrlSummary, crawle",
"urlsProxy = [\"http://proxy.ipcn.org/proxylist.html\"]",
"desSite = 'http://www.360buy.com'",
"class ChoiceProxy(object):\n proxyList = []\n def __init__(self):\... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class SuningAllSortParser(RootCatagoryPageParser):
'''
从http://www.suning.com/获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.suning.com'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'id':'SNmenuNav'})
for t in allSort.findAll(name = 'dl'):#一级
t = t.dt
name,url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSibling(name='dd').find(name='ul',attrs={'class':'sideleft'})
for tt in sort_2(name='li'):#二级分类
name = tt.b.getText().strip()
url = '/'.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.div.findAll(name = 'a'):#三级分类
name, url = ParserUtils.parserTag_A(ttt)
url = ''.join((self.mainHost,url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class SuningSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'id':'toolbar'}).find(attrs={'class':'thispage'}).getText()
totalPage = int(pageSeg.split('/')[-1])
return totalPage
def parserPageInfos(self):
plist = self.soup.find(name='div', attrs={'id':'plist'}).find(name='ul')
resultList = []
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'p-name'}).a)
pid = url.rsplit('/',1)[-1].split('.')[0]
url = ''.join((r'http://www.gome.com.cn',url))
price = ParserUtils.getPrice(li.find(name='div', attrs={'class':'p-price'}).getText())
prodDetail = ProductDetails(productId=pid, privPrice = price,name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class SuningSort4PageParser(SuningSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(SuningSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath,'SuningAllsort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.Suning.com.cn/allSort.html', name='gome')
firstPage = SuningAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
pass
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.Suning.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = SuningSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = SuningSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
import urllib
url = 'http://localhost/webapp/wcs/stores/servlet/odeSearch?storeId=10052&catalogId=10051&categoryId=20003&langId=-7&ip_state=c0%3ds%253A9%253Bcity_id%253B%253Aeq%253B1001B.s%253A9%253Bcity_id%253B%253Aeq%253B5006F.s%253A9%253Bcity_id%253B%253Aeq%253B5006Z.s%253A9%253Bcity_id%253B%253Ass%253B0000A%26c1%3ds%253A9%253Biphrase%2bbundle%2btaxonomy%2bid%2bfrom%2broot%253B%253Ass%253B%253A20003%26q%3d20%26a0%3diphrase%2bbundle%2btaxonomy%252F%252Fv%253A0%26i%3dsitemap%2bid%26qt%3d1313391335%26qid%3dq8GzGmE5P2Ss3%26vid%3dvSXajhCLXuWWu%26ioe%3dUTF-8%26s2%3dsitemap%2bid%252F%252F1%26qtid%3dn8GzGmE5P2Ss3%26s1%3dpublishTime%252F%252F0%26rid%3dr8OlldtbsEwdf%26s0%3drank%252F%252F0%26t%3d0%26m0%3diphrase%2bbundle%2bid%26mcmode%3dtest&suggestionWordList=&isCatalogSearch=1&isList=0&sortType=0¤tPage=1'
print urllib.unquote(url)
#testAllSortPage()
#testSort3Page()
#testSort3Details()
| [
[
8,
0,
0.0556,
0.0556,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0952,
0.0079,
0,
0.66,
0.0833,
488,
0,
1,
0,
0,
488,
0,
0
],
[
3,
0,
0.2183,
0.2222,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"class SuningAllSortParser(RootCatagoryPageParser):\n '''\n 从http://www.suning.com/获取所有的分类信息,\n 组合成ObuyUrlSummary\n ''' \n mainHost = r'http://www.suning.com' \n def _... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-7-26
京东价格图片识别模块
@author: zhongfeng
'''
import ImageFilter, ImageChops
import Image
import re
import time
try:
import psyco
psyco.full()
except ImportError:
pass
class CaptchaAlgorithm(object):
'''captcha algorithm'''
def LevenshteinDistance(self, m, n):
c = [[i] for i in range(0, len(m) + 1)]
c[0] = [j for j in range(0, len(n) + 1)]
for i in range(0, len(m)):
for j in range(0, len(n)):
c[i + 1].append(
min(
c[i][j + 1] + 1,
c[i + 1][j] + 1,
c[i][j] + (0 if m[i] == n[j] else 1)
)
)
return c[-1][-1]
class CaptchaImageAlgorithm(object):
'''captcha image algorithm'''
@staticmethod
def GetPixelsXEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for x in xrange(xsize):
weight = sum(1 if pixels[x, y] == 0 else 0 for y in xrange(ysize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(x)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def GetPixelsYEdges(im):
pixels = im.load()
xsize, ysize = im.size
state = -1
edges = []
for y in xrange(ysize):
weight = sum(1 if pixels[x, y] == 0 else 0 for x in xrange(xsize))
level = 0
if state == -1 and weight <= level:
continue
elif state == 1 and weight > level:
continue
else:
state = -state
edges.append(y)
return [(edges[x], edges[x + 1]) for x in range(0, len(edges), 2)]
@staticmethod
def StripYEdge(im):
yedges = CaptchaImageAlgorithm.GetPixelsYEdges(im)
y1, y2 = yedges[0][0], yedges[-1][1]
return im.crop((0, y1, im.size[0], y2))
@staticmethod
def GetBinaryMap(im):
xsize, ysize = im.size
pixels = im.load()
return '\n'.join(''.join('#' if pixels[x, y] == 0 else '_' for x in xrange(xsize)) for y in xrange(ysize))
@staticmethod
def getBitMapIn(im):
xsize, ysize = im.size
pixels = im.load()
return tuple( 0 if pixels[x, y] == 0 else 255 for x in xrange(xsize) for y in xrange(ysize))
class CaptchaProfile(object):
def fiter(self, im):
raise NotImplemented
def split(self, im):
raise NotImplemented
def match(self, im):
raise NotImplemented
def feature_to_data(feature):
feature = re.sub(r'[\t\s]', '', feature)
feature = re.sub(r'[\r\n]', '', feature)
return tuple(0 if x == '#' else 255 for x in feature)
def captcha(filename, profile):
#s = time.time()
im = Image.open(filename)
#s2 = time.time()
#print 'open',s2-s
im = profile.filter(im)
#s3 = time.time()
#print 'filter',s3 - s2
im_list = profile.split(im)
#s4 = time.time()
#print 'split',s4 - s3
result = ''.join(profile.match(im) for im in im_list)
#print 'match',time.time() - s4
return result
| [
[
8,
0,
0.056,
0.056,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.096,
0.008,
0,
0.66,
0.1,
739,
0,
2,
0,
0,
739,
0,
0
],
[
1,
0,
0.104,
0.008,
0,
0.66,
0.2... | [
"'''\nCreated on 2011-7-26\n\n京东价格图片识别模块\n\n@author: zhongfeng\n'''",
"import ImageFilter, ImageChops",
"import Image",
"import re",
"import time",
"try:\n import psyco\n psyco.full()\nexcept ImportError:\n pass",
" import psyco",
" psyco.full()",
"class CaptchaAlgorithm(object):\n ... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
class GomeAllSortParser(RootCatagoryPageParser):
'''
从http://www.gome.com.cn/allSort.html获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.gome.com.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs = {'id':'allsort'})
for t in allSort.findAll(name = 'div',attrs = {'class':'m'}):#一级
name,url = ParserUtils.parserTag_A(t.find(attrs = {'class':'mt'}).h2.a)
url = ''.join((self.mainHost,url))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.find(attrs = {'class':'mc'})
for tt in sort_2(name='dl'):#二级分类
if tt.dt.a is not None:
name, url = ParserUtils.parserTag_A(tt.a)
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
sort_2_urlsum.catagoryLevel = 3
finalUrlList.append(sort_2_urlsum)
continue
name = tt.dt.getText().strip()
url = '/'.join((self.mainHost,name))
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=False)
for ttt in tt.dd.findAll(name = 'em'):#三级分类
name, url = ParserUtils.parserTag_A(ttt.a)
url = ''.join((self.mainHost,url))
sort_3_urlsum = self.buildSort_N(url, name, sort_2_urlsum)
finalUrlList.append(sort_3_urlsum)
return finalUrlList
class GomeSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def nextPageUrlPattern(self):
urlSegs = self.rootUrlSummary.url.rsplit('.', 1)
pageSeg = '-0-0-0-0-0-0-0-1-1-{}'
return '%s%s.%s' % (urlSegs[0], pageSeg, urlSegs[1])
def getTotal(self):
pageSeg = self.soup.find(name='div',attrs={'id':'toolbar'}).find(attrs={'class':'thispage'}).getText()
totalPage = int(pageSeg.split('/')[-1])
return totalPage
def parserPageInfos(self):
plist = self.soup.find(name='div', attrs={'id':'plist'}).find(name='ul')
resultList = []
for li in plist(name='li'):
pName,url = ParserUtils.parserTag_A(li.find(name='div', attrs={'class':'p-name'}).a)
pid = url.rsplit('/',1)[-1].split('.')[0]
url = ''.join((r'http://www.gome.com.cn',url))
price = ParserUtils.getPrice(li.find(name='div', attrs={'class':'p-price'}).getText())
prodDetail = ProductDetails(productId=pid, privPrice = price,name=pName, adWords='')
resultList.append(prodDetail)
return resultList
class GomeSort4PageParser(GomeSort3PageParser):
'''
分类四级页面为列表页面,只抽取Product信息
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(GomeSort4PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserSubUrlSums(self):
pass
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir,'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath,'gomeAllsort.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.gome.com.cn/allSort.html', name='gome')
firstPage = GomeAllSortParser(content, rootUrlSum)
for sort_3 in firstPage.getBaseSort3UrlSums():
for index, urlsum in enumerate(sort_3.parentPath):
pass
print sort_3.name,sort_3.url ,sort_3.catagoryLevel
def testSort3Page():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for sort_4 in sort3Page.getSort4PageUrlSums():
print sort_4.url
def testSort3Details():
fileName = os.path.join(testFilePath,'10000000-10000012-10000070.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',
parentPath=[('test')], catagoryLevel=3)
sort3Page = GomeSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
testAllSortPage()
testSort3Page()
testSort3Details()
| [
[
8,
0,
0.0543,
0.0543,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.093,
0.0078,
0,
0.66,
0.0833,
488,
0,
1,
0,
0,
488,
0,
0
],
[
3,
0,
0.2326,
0.2558,
0,
0.66,... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"class GomeAllSortParser(RootCatagoryPageParser):\n '''\n 从http://www.gome.com.cn/allSort.html获取所有的分类信息,\n 组合成ObuyUrlSummary\n ''' \n mainHost = r'http://www.gome.com.cn'... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',name='gome',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070-0-0-0-0-0-0-0-1-1-3.html',
name='手机',catagoryLevel = 4)
newEggRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://www.gome.com.cn/tv.html',
name='电视',catagoryLevel = 1)
spider = ObuySpider(rootUrlSummary = newEggRoot,parserDict = parserDict,include = None,exclude = None,threadNum = 5)
spider.spide() | [
[
8,
0,
0.1935,
0.1613,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3226,
0.0323,
0,
0.66,
0.25,
507,
0,
3,
0,
0,
507,
0,
0
],
[
1,
0,
0.3548,
0.0323,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,4:GomeSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070.html',name='gome',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/products/10000000-10000012-10000070-0-0-0-0-0-0-0-1-1-3.html',
name='手机',catagoryLevel = 4)
newEggRoot = ObuyUrlSummary(url = r'http://www.gome.com.cn/allSort.html',name='gome',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://www.gome.com.cn/tv.html',
name='电视',catagoryLevel = 1)
spider = ObuySpider(rootUrlSummary = newEggRoot,parserDict = parserDict,include = None,exclude = None,threadNum = 5)
spider.spide() | [
[
8,
0,
0.1935,
0.1613,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.3226,
0.0323,
0,
0.66,
0.25,
507,
0,
3,
0,
0,
507,
0,
0
],
[
1,
0,
0.3548,
0.0323,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from gomepageparser import GomeAllSortParser,GomeSort3PageParser,GomeSort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:GomeAllSortParser,3:GomeSort3PageParser,... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from pageparser import *
import re
from copy import deepcopy
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort2UrlSums(self):
finalUrlList = []
allSort = self.soup.find(attrs={"id":"siteDirectory"})
for t in allSort.findAll(name='div', attrs={"class":"popover-grouping"}):#一级分类
name = t.find(name='div', attrs={"class":"popover-category-name"}).h2.getText()
url = ''.join((self.mainHost, name))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=False)
sort_2 = t.findNextSiblings(name='div')
for tt in sort_2:#二级分类
name, url = ParserUtils.parserTag_A(tt.a)
url = ''.join((self.mainHost,url))
if name.startswith(u'所有'):
continue
sort_2_urlsum = self.buildSort_N(url, name, sort_1_urlsum, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
return self.filterUrlList(result)
class AmazonSort2Parser(RootCatagoryPageParser):
'''
从http://www.amazon.cn/gp/site-directory获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'分类') >= 0
def getBaseSort3UrlSums(self):
finalUrlList = []
allSort3 = self.soup.findAll(name='div', attrs={"class":"unified_widget blurb"})
for alls3 in allSort3:
if self.__isCat(alls3.h2.getText()):
break
for t in alls3.findAll(name='div',attrs={'class':'title'}):
name, url = ParserUtils.parserTag_A(t.a)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
class AmazonSort3PageParser(Sort3PageParser):
'''
三级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parserPageInfos(self):
resultList = []
for prod in self.soup.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
pName, url = ParserUtils.parserTag_A(prod.find(name='div', attrs={'class':'title'}).a)
pid = pName
currentPrice = ParserUtils.getPrice(prod.find(name='div',attrs={'class':'newPrice'}).span.getText())
bypastSeg = prod.find(name='div',attrs={'class':'newPrice'}).strike
pastPrice = '0.00'
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
resultList.append(prodDetail)
return resultList
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
print url
url = url.replace(r'/gp/search','#')
baseUrl = self.rootUrlSummary.url.rsplit('#')[0]
fullUrl = ''.join((baseUrl,url))
return fullUrl
def parserSubUrlSums(self):
result = self.__getNextPageUrl()
if result is None:
return []
else:
urlSum = deepcopy(self.rootUrlSummary)
urlSum.url = result
return [urlSum]
''' test '''
import os
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazonSite.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/site-directory/ref=topnav_sad', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_2 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_2.url , sort_2.catagoryLevel
def testSort2Page():
fileName = os.path.join(testFilePath, '888465051.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/%E7%94%B5%E8%84%91%E5%8F%8A%E9%85%8D%E4%BB%B6/b/ref=sd_allcat_pc?ie=UTF8&node=888465051',
parentPath=[('test')], catagoryLevel=2)
sort3Page = AmazonSort2Parser(content, sort_2_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Page():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testSort3Details():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
def testComment():
from BeautifulSoup import BeautifulSoup, Comment
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'watch_json.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort3PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
#testAllSortPage()
#testSort2Page()
#testSort3Page()
#testSort3Details()
#testComment()
testJson()
| [
[
8,
0,
0.033,
0.033,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0566,
0.0047,
0,
0.66,
0.0588,
488,
0,
1,
0,
0,
488,
0,
0
],
[
1,
0,
0.0613,
0.0047,
0,
0.66,
... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from pageparser import *",
"import re",
"from copy import deepcopy",
"class AmazonAllSortParser(RootCatagoryPageParser):\n '''\n 从http://www.amazon.cn/gp/site-directory获取所有的分类信息,\n 组合成ObuyUrlSummary\n ... |
#/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2011-7-27
主要用于从网站上爬取信息后,抽取页面信息;
@author: zhongfeng
'''
from BeautifulSoup import BeautifulSoup, Comment
from copy import deepcopy
from pageparser import *
import itertools
import json
import os
import re
import urllib
import urlparse
class AmazonAllSortParser(RootCatagoryPageParser):
'''
从http://www.amazon.cn获取所有的分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonAllSortParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __getBaseSort1UrlSums(self):
finalUrlList = []
allSort = self.soup.find(name='select',attrs={"id":"searchDropdownBox"})
base_url = r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url={}&field-keywords=&x=20&y=15'
for t in allSort.findAll(name='option'):#一级分类
searchAias = t['value']
name = searchAias.split('=')[-1]
if name == 'aps':
continue
url = base_url.format(urllib.quote(searchAias))
sort_1_urlsum = self.buildSort_N(url, name, self.rootUrlSummary)
finalUrlList.append(sort_1_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort1UrlSums()
return self.filterUrlList(result)
class AmazonSort1Parser(RootCatagoryPageParser):
'''
从一级分类获取所有的2级分类信息,
组合成ObuyUrlSummary
'''
mainHost = r'http://www.amazon.cn'
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort1Parser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def __isCat(self, catName):
return catName.find(u'类别') >= 0
def __getBaseSort2UrlSums(self):
finalUrlList = []
sort2 = self.soup.find(name='div', attrs={"id":"refinements"})
#refId = 'ref_%s' % urllib.unquote(sort2['data-browseladder']).split(':')[-1]
#allSort2Seg = sort2.find(name='ul',attrs={'id':refId})
for catSeg in sort2(name='h2'):
if self.__isCat(catSeg.getText().strip()):
break
allSort2Seg = catSeg.findNextSibling(name='ul')
for t in allSort2Seg.findAll(name='a'):
name, url = ParserUtils.parserTag_A(t)
url = ''.join((self.mainHost,url))
sort_2_urlsum = self.buildSort_N(url, name, self.rootUrlSummary, isCrawle=True)
finalUrlList.append(sort_2_urlsum);
return finalUrlList
def parserSubUrlSums(self):
result = self.__getBaseSort2UrlSums()
return self.filterUrlList(result)
class AmazonSort2PageParser(Sort3PageParser):
'''
二级页面解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort2PageParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
def parseProduct(self, prod):
titleSeg = prod.find(name='div', attrs={'class':'title'})
if titleSeg is None:
return
pName, url = ParserUtils.parserTag_A(titleSeg.a)
pid = pName
priceSeg = prod.find(name='div', attrs={'class':'newPrice'})
pastPrice = '0.00'
currentPrice = '0.00'
if priceSeg != None:
currentPrice = ParserUtils.getPrice(priceSeg.span.getText())
bypastSeg = priceSeg.strike
if bypastSeg != None:
pastPrice = ParserUtils.getPrice(bypastSeg.getText())
prodDetail = ProductDetails(productId=pid, privPrice=currentPrice, pubPrice=pastPrice,
name=pName, adWords='')
return prodDetail
def parserPageInfos(self):
resultList = []
soupRoot = self.soup
for prod in soupRoot.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
resultsAtfNextSeg = self.soup.find(attrs = {'id':'results-atf-next'})
if resultsAtfNextSeg != None:
resultsAtfNext = resultsAtfNextSeg.find(text=lambda text:isinstance(text, Comment))
spt = BeautifulSoup(resultsAtfNext,convertEntities = BeautifulSoup.HTML_ENTITIES)
for prod in spt.findAll(name='div',attrs={'id':re.compile(r'result_[0-9]+')}):
prodDetail = self.parseProduct(prod)
if prodDetail != None:
resultList.append(prodDetail)
return resultList
def __nextPagePattern(self):
return r'http://www.amazon.cn/mn/search/ajax/{}&tab={}&pageTypeID={}&fromHash=§ion=BTF&fromApp=undefined&fromPage=undefined&version=2'
def __getNextPageUrl(self):
nextPageSeg = self.soup.find(attrs={'id':'pagnNextLink'})
fullUrl = None
if nextPageSeg != None:
name,url = ParserUtils.parserTag_A(nextPageSeg)
t= urlparse.urlparse(url)
qsDict = urlparse.parse_qs(t.query)
pageTypeID = qsDict['rh'][0].split(',')[-1].split(':')[-1]
ref = url.replace(r'/gp/search/','')
tab = self.rootUrlSummary.parentPath[1].name
fullUrl = self.__nextPagePattern().format(ref,tab,pageTypeID)
return fullUrl
def parserSubUrlSums(self):
nextPageUrl = self.__getNextPageUrl()
if nextPageUrl is None:
return []
else:
urlSum = self.buildSort_4(nextPageUrl)
urlSum.catagoryLevel = 3
return [urlSum]
class AmazonSort3JsonParser(Parser):
'''
Sort3Json解析类
'''
def __init__(self, dataStr, rootUrlSummary, include=None, exclude=None):
super(AmazonSort3JsonParser, self).__init__(dataStr, rootUrlSummary, include, exclude)
segList = self.dataStr.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
self.pageNextSeg = jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
self.resultsBtf = jsonObj['results-btf']['data']['value']
if jsonObj.has_key('results-atf-next'):
self.resultsAtf = jsonObj['results-atf-next']['data']['value']
def parserPageInfos(self):
result = []
retBtf = AmazonSort2PageParser(self.resultsBtf,self.rootUrlSummary).parserPageInfos()
retAtf = AmazonSort2PageParser(self.resultsAtf,self.rootUrlSummary).parserPageInfos()
result.extend(itertools.chain(retBtf,retAtf))
return result
def parserSubUrlSums(self):
return AmazonSort2PageParser(self.pageNextSeg,self.rootUrlSummary).parserSubUrlSums()
''' test '''
curModDir = os.path.dirname(os.path.abspath(__file__))
testFilePath = os.path.join(curModDir, 'test_resources')
def testAllSortPage():
fileName = os.path.join(testFilePath, 'amazon.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootUrlSum = ObuyUrlSummary(url=r'http://www.amazon.cn/', name='Amazon')
include = [ ObuyUrlSummary(url=r'http://http://www.newegg.com.cn/Category/536.htm',
name='服务器', catagoryLevel=2)]
firstPage = AmazonAllSortParser(content, rootUrlSum, include=None)
for sort_1 in firstPage.parserSubUrlSums():
#for index, urlsum in enumerate(sort_3.parentPath):
#print '\t' * index, str(urlsum.getUrlSumAbstract())
print sort_1.url , sort_1.catagoryLevel
def testSort1Page():
fileName = os.path.join(testFilePath, 'toys_games.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[('test')], catagoryLevel=1)
sort2Page = AmazonSort1Parser(content, sort_1_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
def testSort2Page():
fileName = os.path.join(testFilePath, 'amazon_2011-08-12_15-58-49.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_2_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort2Page = AmazonSort2PageParser(content, sort_2_urlsum)
for sort_2 in sort2Page.parserSubUrlSums():
print sort_2.url
for product in sort2Page.parserPageInfos():
print product.logstr()
def testSort3Details():
fileName = os.path.join(testFilePath, 'toys_games_1.json')
with open(fileName, 'r') as fInput:
content = fInput.read()
rootObuyUrlSummary = ObuyUrlSummary(url=r'http://www.amazon.cn/',parentPath=[], catagoryLevel=0)
sort_1_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=nb_sb_noss?__mk_zh_CN=%E4%BA%9A%E9%A9%AC%E9%80%8A%E7%BD%91%E7%AB%99&url=search-alias%3Dtoys-and-games&field-keywords=&x=16&y=14',
parentPath=[rootObuyUrlSummary], catagoryLevel=1)
sort_1_urlsum.name = 'toys-and-games'
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051',
parentPath=[rootObuyUrlSummary,sort_1_urlsum], catagoryLevel=2)
sort3Page = AmazonSort3JsonParser(content, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
for sort_3 in sort3Page.parserSubUrlSums():
print sort_3.url
def testComment():
fileName = os.path.join(testFilePath, 'computer.html')
with open(fileName, 'r') as fInput:
content = fInput.read()
soup = BeautifulSoup(content)
comments = soup.findAll(text=lambda text:isinstance(text, Comment))
for comment in comments:
print comment.extract()
def testJson():
import json
fileName = os.path.join(testFilePath, 'toys_games_1.json')
with open(fileName, 'r') as fInput:
content = fInput.read()
segList = content.split('&&&')
segList = [' '.join(seg.split()).strip() for seg in segList]
segList = filter(lambda seg:seg != '',segList)
jSonObjs = [json.loads(seg) for seg in segList ]
for jsonObj in jSonObjs:
if jsonObj.has_key('pagination'):
print jsonObj['pagination']['data']['value']
if jsonObj.has_key('results-btf'):
print '+++++++++++++++++'
jsonRet = jsonObj['results-btf']['data']['value']
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort2PageParser(jsonRet, sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
elif jsonObj.has_key('results-atf-next'):
print '--------------'
jsonRet = jsonObj['results-atf-next']['data']['value']
from BeautifulSoup import BeautifulSoup, Comment
soup = BeautifulSoup(jsonRet)
comment = soup.find(attrs={'id':'results-atf-next'},text=lambda text:isinstance(text, Comment))
sort_3_urlsum = ObuyUrlSummary(url=r'http://www.amazon.cn/s/ref=amb_link_29560972_51?ie=UTF8&rh=n%3A888484051&page=1&pf_rd_m=A1AJ19PSB66TGU&pf_rd_s=center-5&pf_rd_r=0AHSS2SXTF1E0C6A69MM&pf_rd_t=101&pf_rd_p=61174572&pf_rd_i=888465051#/ref=sr_pg_2?rh=n%3A888465051%2Cn%3A888474051%2Cn%3A888484051&page=2&ie=UTF8&qid=1312881351',
parentPath=[('test')], catagoryLevel=3)
sort3Page = AmazonSort2PageParser(comment.extract(), sort_3_urlsum)
for product in sort3Page.parserPageInfos():
print product.logstr()
if __name__ == '__main__':
# testAllSortPage()
#testSort1Page()
testSort2Page()
#testSort3Details()
#testComment()
#testJson()
#/gp/search/ref=sr_nr_n_0?rh=n%3A647070051%2Cn%3A!647071051%2Cn%3A1982054051&bbn=647071051&ie=UTF8&qid=1313051441&rnid=647071051
#/gp/search/ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=131311239
#ref=sr_pg_2?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=2&bbn=647071051&ie=UTF8&qid=1313112393&tab=toys-and-games&pageTypeID=1982054051&fromHash=&fromRH=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051§ion=BTF&fromApp=undefined&fromPage=undefined&version=2
#ref=sr_pg_3?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=3&bbn=647071051&ie=UTF8&qid=1313112553&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_2%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D2%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112393§ion=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2
#ref=sr_pg_5?rh=n%3A647070051%2Cn%3A%21647071051%2Cn%3A1982054051&page=5&bbn=647071051&ie=UTF8&qid=1313112793&tab=toys-and-games&pageTypeID=1982054051&fromHash=%2Fref%3Dsr_pg_4%3Frh%3Dn%253A647070051%252Cn%253A%2521647071051%252Cn%253A1982054051%26page%3D4%26bbn%3D647071051%26ie%3DUTF8%26qid%3D1313112677§ion=BTF&fromApp=gp%2Fsearch&fromPage=results&version=2 | [
[
8,
0,
0.0236,
0.0236,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.0405,
0.0034,
0,
0.66,
0.0435,
878,
0,
2,
0,
0,
878,
0,
0
],
[
1,
0,
0.0439,
0.0034,
0,
0.66... | [
"'''\nCreated on 2011-7-27\n\n主要用于从网站上爬取信息后,抽取页面信息;\n\n@author: zhongfeng\n'''",
"from BeautifulSoup import BeautifulSoup, Comment",
"from copy import deepcopy",
"from pageparser import *",
"import itertools",
"import json",
"import os",
"import re",
"import urllib",
"import urlparse",
"class Am... |
#!/usr/bin/env python
# -*- coding: utf8 -*-
'''
Created on 2011-8-2
@author: zhongfeng
'''
from dangpageparser import DangDangAllSortParser,DangDangSort3PageParser,DangDangSort4PageParser
from pageparser import ObuyUrlSummary
from spider import ObuySpider
if __name__ == '__main__':
parserDict = {0:DangDangAllSortParser,3:DangDangSort3PageParser,4:DangDangSort4PageParser}
sort3 = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4002134&store=eq0',name='dangdang',
isRecursed = True,catagoryLevel = 3)
digitRoot = ObuyUrlSummary(url = r'http://www.newegg.com.cn/SubCategory/1046-3.htm?pageSize=96',
name='digital',catagoryLevel = 4)
#spider = ObuySpider(rootUrlSummary = sort3,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
#spider.spide()
dangdangRoot = ObuyUrlSummary(url = r'http://category.dangdang.com/',name='dangdang',
isRecursed = True,catagoryLevel = 0)
pserver = ObuyUrlSummary(url = r'http://category.dangdang.com/list?cat=4001976',
name='奶粉',catagoryLevel = 2)
spider = ObuySpider(rootUrlSummary = dangdangRoot,parserDict = parserDict,include =None,exclude = None,threadNum = 10)
spider.spide() | [
[
8,
0,
0.1667,
0.1389,
0,
0.66,
0,
0,
1,
0,
0,
0,
0,
0,
0
],
[
1,
0,
0.2778,
0.0278,
0,
0.66,
0.25,
953,
0,
3,
0,
0,
953,
0,
0
],
[
1,
0,
0.3056,
0.0278,
0,
0.66,
... | [
"'''\nCreated on 2011-8-2\n\n@author: zhongfeng\n'''",
"from dangpageparser import DangDangAllSortParser,DangDangSort3PageParser,DangDangSort4PageParser",
"from pageparser import ObuyUrlSummary",
"from spider import ObuySpider",
"if __name__ == '__main__':\n\n parserDict = {0:DangDangAllSortParser,3:Dang... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.