123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313 |
- # Define here the models for your spider middleware
- #
- # See documentation in:
- # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
- import re
- import hashlib
- from urllib.parse import urlencode
- import urllib
- import time
- from scrapy import signals
- # useful for handling different item types with a single interface
- # from itemadapter import is_item, ItemAdapter
- from content_spider.Util import hashUtil
- class ContentSpiderSpiderMiddleware:
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the spider middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_spider_input(self, response, spider):
- # Called for each response that goes through the spider
- # middleware and into the spider.
- # Should return None or raise an exception.
- return None
- def process_spider_output(self, response, result, spider):
- # Called with the results returned from the Spider, after
- # it has processed the response.
- # Must return an iterable of Request, or item objects.
- for i in result:
- yield i
- def process_spider_exception(self, response, exception, spider):
- # Called when a spider or process_spider_input() method
- # (from other spider middleware) raises an exception.
- # Should return either None or an iterable of Request or item objects.
- pass
- def process_start_requests(self, start_requests, spider):
- # Called with the start requests of the spider, and works
- # similarly to the process_spider_output() method, except
- # that it doesn’t have a response associated.
- # Must return only requests (not items).
- for r in start_requests:
- yield r
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class ContentSpiderDownloaderMiddleware:
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- # Called for each request that goes through the downloader
- # middleware.
- # Must either:
- # - return None: continue processing this request
- # - or return a Response object
- # - or return a Request object
- # - or raise IgnoreRequest: process_exception() methods of
- # installed downloader middleware will be called
- return None
- def process_response(self, request, response, spider):
- # Called with the response returned from the downloader.
- # Must either;
- # - return a Response object
- # - return a Request object
- # - or raise IgnoreRequest
- return response
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class LianshangSpiderDownloaderMiddleware:
- # Not all methods need to be defined. If a method is not defined,
- # scrapy acts as if the downloader middleware does not modify the
- # passed objects.
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- url = request._url
- ts = int(time.time())
- request._url = re.sub(r'oauth_timestamp=\d+', 'oauth_timestamp={}'.format(ts), url)
- return None
- def process_response(self, request, response, spider):
- # Called with the response returned from the downloader.
- # Must either;
- # - return a Response object
- # - return a Request object
- # - or raise IgnoreRequest
- return response
- def process_exception(self, request, exception, spider):
- # Called when a download handler or a process_request()
- # (from other downloader middleware) raises an exception.
- # Must either:
- # - return None: continue processing this exception
- # - return a Response object: stops process_exception() chain
- # - return a Request object: stops process_exception() chain
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- class WangyiSpiderDownloaderMiddleware:
- consumerKey = "58434765"
- secretKey = "AECnczs1GpBGDSXz"
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- url = request._url
- request._url = self.re_sign(url)
- return None
- def process_response(self, request, response, spider):
- return response
- def process_exception(self, request, exception, spider):
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- def get_sign(self, primary_url, param):
- url = 'GET' + primary_url
- param = sorted(param.items(), key=lambda x: x[0])
- string = ''
- m = hashlib.md5()
- for item in param:
- string = string + '{}={}'.format(str(item[0]), str(item[1]))
- string = url + string + self.secretKey
- string = urllib.parse.quote(string, '')
- m.update(string.encode('utf-8'))
- sign = m.hexdigest()
- return sign
- def re_sign(self, url):
- res = urllib.parse.urlsplit(url)
- simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
- query_param = urllib.parse.parse_qs(res[3])
- timestamp = int(time.time() * 1000)
- param = {}
- for item in query_param:
- if item == 'timestamp':
- param['timestamp'] = timestamp
- elif item == 'expires':
- param['expires'] = timestamp + 10 * 60 * 1000
- elif item == 'sign':
- continue
- else:
- param[item] = query_param[item][0]
- param['sign'] = self.get_sign(simple_url, param)
- return simple_url + '?' + urlencode(param)
- class BimoSpiderDownloaderMiddleware:
- api_key = 'ZmfJQZaF8FuQSuUx'
- api_secret = 'JgBETPKMPgsRBqlNtKajwQf4zFQuMwYr'
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- url = request._url
- request._url = self.re_sign(url)
- return None
- def process_response(self, request, response, spider):
- return response
- def process_exception(self, request, exception, spider):
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- def re_sign(self, url):
- res = urllib.parse.urlsplit(url)
- simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
- query_param = urllib.parse.parse_qs(res[3])
- param = dict(time=int(time.time()))
- for item in query_param:
- if item == 'sign' or item == 'signType' or item == 'time':
- continue
- else:
- param[item] = query_param[item][0]
- param['sign'] = self.sign(param)
- param['signType'] = 'MD5'
- return simple_url + '?' + urlencode(param)
- def sign(self, param):
- param = sorted(param.items(), key=lambda x: x[0])
- string = ''
- for item in param:
- string = string + str(item[0]) + '=' + str(item[1]) + '&'
- string = string + 'apiSecret=' + self.api_secret
- return hashUtil.md5(string)
- class MotieSpiderDownloaderMiddleware:
- customerId = '922'
- customerSecret = 'kQSdaERniXSxzr20IJgtkvru1nSLFLjR'
- @classmethod
- def from_crawler(cls, crawler):
- # This method is used by Scrapy to create your spiders.
- s = cls()
- crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
- return s
- def process_request(self, request, spider):
- url = request._url
- new_url = self.re_sign(url)
- if new_url != '' and new_url is not None:
- request._url = self.re_sign(url)
- return None
- def process_response(self, request, response, spider):
- return response
- def process_exception(self, request, exception, spider):
- pass
- def spider_opened(self, spider):
- spider.logger.info('Spider opened: %s' % spider.name)
- def re_sign(self, url):
- res = urllib.parse.urlsplit(url)
- simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
- query_param = urllib.parse.parse_qs(res[3])
- timestamp = int(time.time()) * 1000
- param = {}
- sign_string = ''
- if res[2] == '/api/motie/get/book':
- return simple_url + '?customerId={}'.format(query_param['customerId'][0])
- for item in query_param:
- if item == 'timestamp' or item == 'sign':
- continue
- param[item] = query_param[item][0]
- param['timestamp'] = timestamp
- if res[2] == '/api/motie/get/bookinfo' or res[2] == '/api/motie/get/chapterlist':
- sign_string = '{}#{}#{}#{}'.format(
- query_param['bookId'][0],
- query_param['customerId'][0],
- timestamp, self.customerSecret)
- if res[2] == '/api/motie/get/chapter':
- sign_string = '{}#{}#{}#{}#{}'.format(
- query_param['bookId'][0],
- query_param['chapterId'][0],
- query_param['customerId'][0],
- timestamp, self.customerSecret)
- if sign_string == '':
- return None
- param['sign'] = hashUtil.md5(sign_string)
- return simple_url + '?' + urlencode(param)
|