middlewares.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313
  1. # Define here the models for your spider middleware
  2. #
  3. # See documentation in:
  4. # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
  5. import re
  6. import hashlib
  7. from urllib.parse import urlencode
  8. import urllib
  9. import time
  10. from scrapy import signals
  11. # useful for handling different item types with a single interface
  12. # from itemadapter import is_item, ItemAdapter
  13. from content_spider.Util import hashUtil
  14. class ContentSpiderSpiderMiddleware:
  15. # Not all methods need to be defined. If a method is not defined,
  16. # scrapy acts as if the spider middleware does not modify the
  17. # passed objects.
  18. @classmethod
  19. def from_crawler(cls, crawler):
  20. # This method is used by Scrapy to create your spiders.
  21. s = cls()
  22. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  23. return s
  24. def process_spider_input(self, response, spider):
  25. # Called for each response that goes through the spider
  26. # middleware and into the spider.
  27. # Should return None or raise an exception.
  28. return None
  29. def process_spider_output(self, response, result, spider):
  30. # Called with the results returned from the Spider, after
  31. # it has processed the response.
  32. # Must return an iterable of Request, or item objects.
  33. for i in result:
  34. yield i
  35. def process_spider_exception(self, response, exception, spider):
  36. # Called when a spider or process_spider_input() method
  37. # (from other spider middleware) raises an exception.
  38. # Should return either None or an iterable of Request or item objects.
  39. pass
  40. def process_start_requests(self, start_requests, spider):
  41. # Called with the start requests of the spider, and works
  42. # similarly to the process_spider_output() method, except
  43. # that it doesn’t have a response associated.
  44. # Must return only requests (not items).
  45. for r in start_requests:
  46. yield r
  47. def spider_opened(self, spider):
  48. spider.logger.info('Spider opened: %s' % spider.name)
  49. class ContentSpiderDownloaderMiddleware:
  50. # Not all methods need to be defined. If a method is not defined,
  51. # scrapy acts as if the downloader middleware does not modify the
  52. # passed objects.
  53. @classmethod
  54. def from_crawler(cls, crawler):
  55. # This method is used by Scrapy to create your spiders.
  56. s = cls()
  57. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  58. return s
  59. def process_request(self, request, spider):
  60. # Called for each request that goes through the downloader
  61. # middleware.
  62. # Must either:
  63. # - return None: continue processing this request
  64. # - or return a Response object
  65. # - or return a Request object
  66. # - or raise IgnoreRequest: process_exception() methods of
  67. # installed downloader middleware will be called
  68. return None
  69. def process_response(self, request, response, spider):
  70. # Called with the response returned from the downloader.
  71. # Must either;
  72. # - return a Response object
  73. # - return a Request object
  74. # - or raise IgnoreRequest
  75. return response
  76. def process_exception(self, request, exception, spider):
  77. # Called when a download handler or a process_request()
  78. # (from other downloader middleware) raises an exception.
  79. # Must either:
  80. # - return None: continue processing this exception
  81. # - return a Response object: stops process_exception() chain
  82. # - return a Request object: stops process_exception() chain
  83. pass
  84. def spider_opened(self, spider):
  85. spider.logger.info('Spider opened: %s' % spider.name)
  86. class LianshangSpiderDownloaderMiddleware:
  87. # Not all methods need to be defined. If a method is not defined,
  88. # scrapy acts as if the downloader middleware does not modify the
  89. # passed objects.
  90. @classmethod
  91. def from_crawler(cls, crawler):
  92. # This method is used by Scrapy to create your spiders.
  93. s = cls()
  94. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  95. return s
  96. def process_request(self, request, spider):
  97. url = request._url
  98. ts = int(time.time())
  99. request._url = re.sub(r'oauth_timestamp=\d+', 'oauth_timestamp={}'.format(ts), url)
  100. return None
  101. def process_response(self, request, response, spider):
  102. # Called with the response returned from the downloader.
  103. # Must either;
  104. # - return a Response object
  105. # - return a Request object
  106. # - or raise IgnoreRequest
  107. return response
  108. def process_exception(self, request, exception, spider):
  109. # Called when a download handler or a process_request()
  110. # (from other downloader middleware) raises an exception.
  111. # Must either:
  112. # - return None: continue processing this exception
  113. # - return a Response object: stops process_exception() chain
  114. # - return a Request object: stops process_exception() chain
  115. pass
  116. def spider_opened(self, spider):
  117. spider.logger.info('Spider opened: %s' % spider.name)
  118. class WangyiSpiderDownloaderMiddleware:
  119. consumerKey = "58434765"
  120. secretKey = "AECnczs1GpBGDSXz"
  121. @classmethod
  122. def from_crawler(cls, crawler):
  123. # This method is used by Scrapy to create your spiders.
  124. s = cls()
  125. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  126. return s
  127. def process_request(self, request, spider):
  128. url = request._url
  129. request._url = self.re_sign(url)
  130. return None
  131. def process_response(self, request, response, spider):
  132. return response
  133. def process_exception(self, request, exception, spider):
  134. pass
  135. def spider_opened(self, spider):
  136. spider.logger.info('Spider opened: %s' % spider.name)
  137. def get_sign(self, primary_url, param):
  138. url = 'GET' + primary_url
  139. param = sorted(param.items(), key=lambda x: x[0])
  140. string = ''
  141. m = hashlib.md5()
  142. for item in param:
  143. string = string + '{}={}'.format(str(item[0]), str(item[1]))
  144. string = url + string + self.secretKey
  145. string = urllib.parse.quote(string, '')
  146. m.update(string.encode('utf-8'))
  147. sign = m.hexdigest()
  148. return sign
  149. def re_sign(self, url):
  150. res = urllib.parse.urlsplit(url)
  151. simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
  152. query_param = urllib.parse.parse_qs(res[3])
  153. timestamp = int(time.time() * 1000)
  154. param = {}
  155. for item in query_param:
  156. if item == 'timestamp':
  157. param['timestamp'] = timestamp
  158. elif item == 'expires':
  159. param['expires'] = timestamp + 10 * 60 * 1000
  160. elif item == 'sign':
  161. continue
  162. else:
  163. param[item] = query_param[item][0]
  164. param['sign'] = self.get_sign(simple_url, param)
  165. return simple_url + '?' + urlencode(param)
  166. class BimoSpiderDownloaderMiddleware:
  167. api_key = 'ZmfJQZaF8FuQSuUx'
  168. api_secret = 'JgBETPKMPgsRBqlNtKajwQf4zFQuMwYr'
  169. @classmethod
  170. def from_crawler(cls, crawler):
  171. # This method is used by Scrapy to create your spiders.
  172. s = cls()
  173. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  174. return s
  175. def process_request(self, request, spider):
  176. url = request._url
  177. request._url = self.re_sign(url)
  178. return None
  179. def process_response(self, request, response, spider):
  180. return response
  181. def process_exception(self, request, exception, spider):
  182. pass
  183. def spider_opened(self, spider):
  184. spider.logger.info('Spider opened: %s' % spider.name)
  185. def re_sign(self, url):
  186. res = urllib.parse.urlsplit(url)
  187. simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
  188. query_param = urllib.parse.parse_qs(res[3])
  189. param = dict(time=int(time.time()))
  190. for item in query_param:
  191. if item == 'sign' or item == 'signType' or item == 'time':
  192. continue
  193. else:
  194. param[item] = query_param[item][0]
  195. param['sign'] = self.sign(param)
  196. param['signType'] = 'MD5'
  197. return simple_url + '?' + urlencode(param)
  198. def sign(self, param):
  199. param = sorted(param.items(), key=lambda x: x[0])
  200. string = ''
  201. for item in param:
  202. string = string + str(item[0]) + '=' + str(item[1]) + '&'
  203. string = string + 'apiSecret=' + self.api_secret
  204. return hashUtil.md5(string)
  205. class MotieSpiderDownloaderMiddleware:
  206. customerId = '922'
  207. customerSecret = 'kQSdaERniXSxzr20IJgtkvru1nSLFLjR'
  208. @classmethod
  209. def from_crawler(cls, crawler):
  210. # This method is used by Scrapy to create your spiders.
  211. s = cls()
  212. crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
  213. return s
  214. def process_request(self, request, spider):
  215. url = request._url
  216. new_url = self.re_sign(url)
  217. if new_url != '' and new_url is not None:
  218. request._url = self.re_sign(url)
  219. return None
  220. def process_response(self, request, response, spider):
  221. return response
  222. def process_exception(self, request, exception, spider):
  223. pass
  224. def spider_opened(self, spider):
  225. spider.logger.info('Spider opened: %s' % spider.name)
  226. def re_sign(self, url):
  227. res = urllib.parse.urlsplit(url)
  228. simple_url = '{}://{}{}'.format(res[0], res[1], res[2])
  229. query_param = urllib.parse.parse_qs(res[3])
  230. timestamp = int(time.time()) * 1000
  231. param = {}
  232. sign_string = ''
  233. if res[2] == '/api/motie/get/book':
  234. return simple_url + '?customerId={}'.format(query_param['customerId'][0])
  235. for item in query_param:
  236. if item == 'timestamp' or item == 'sign':
  237. continue
  238. param[item] = query_param[item][0]
  239. param['timestamp'] = timestamp
  240. if res[2] == '/api/motie/get/bookinfo' or res[2] == '/api/motie/get/chapterlist':
  241. sign_string = '{}#{}#{}#{}'.format(
  242. query_param['bookId'][0],
  243. query_param['customerId'][0],
  244. timestamp, self.customerSecret)
  245. if res[2] == '/api/motie/get/chapter':
  246. sign_string = '{}#{}#{}#{}#{}'.format(
  247. query_param['bookId'][0],
  248. query_param['chapterId'][0],
  249. query_param['customerId'][0],
  250. timestamp, self.customerSecret)
  251. if sign_string == '':
  252. return None
  253. param['sign'] = hashUtil.md5(sign_string)
  254. return simple_url + '?' + urlencode(param)