12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364 |
- #!/usr/bin/env python
- # -*- coding: utf-8 -*-
- '''
- @Auther :liuyuqi.gov@msn.cn
- @Time :2018/4/11 1:07
- @File :main.py
- '''
- import re
- import bs4
- import urllib.request
- url_home = 'http://menshijian8.com' # 要采集的网站
- url_set = set()
- url_cache = set()
- url_count = 0
- url_maxCount = 1000000 # 最大采集数量c
- url_pattern = url_home + '([\s\S]*)\.html' # 正则表达式匹配文章页面,此处需完善为更好的写法
- # 采集匹配文章内容的href标签
- def spiderURL(url, pattern):
- html = urllib.request.urlopen(url).read().decode('utf8')
- soup = bs4.BeautifulSoup(html, 'html.parser')
- links = soup.find_all('a', href=re.compile(pattern))
- for link in links:
- if link['href'] not in url_cache & link["href"]=="":
- url_set.add(link['href'])
- return soup
- def findImages(dom):
- img=""
- return img
- def crwal():
- # 采集的过程 异常处理还需要完善,对于一些加了防采集的站,还需要处理header的,下次我们再学习
- spiderURL(url_home, url_pattern)
- while len(url_set) != 0:
- try:
- url = url_set.pop()
- url_cache.add(url)
- soup = spiderURL(url, url_pattern)
- page = soup.find('div', {'class': 'content'})
- title = page.find('h1').get_text()
- autor = page.find('h4').get_text()
- content = page.find('article').get_text()
- print(title, autor, url)
- except Exception as e:
- print(url, e)
- continue
- else:
- url_count += 1
- finally:
- if url_count == url_maxCount:
- break
- print('一共采集了: ' + str(url_count) + ' 条数据')
- if __name__ == '__main__':
- crwal()
|