Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
P
Python-100-Days
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
huangkq
Python-100-Days
Commits
b3a88734
Commit
b3a88734
authored
May 29, 2018
by
jackfrued
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
添加了爬虫第2天代码
parent
e4204ed9
Changes
5
Show whitespace changes
Inline
Side-by-side
Showing
5 changed files
with
179 additions
and
7 deletions
+179
-7
02.数据采集和解析.md
Day66-75/02.数据采集和解析.md
+40
-2
03.存储数据.md
Day66-75/03.存储数据.md
+9
-3
example02.py
Day66-75/code/example02.py
+16
-2
example04.py
Day66-75/code/example04.py
+31
-0
example05.py
Day66-75/code/example05.py
+83
-0
No files found.
Day66-75/02.数据采集和解析.md
View file @
b3a88734
...
...
@@ -4,8 +4,9 @@
1.
下载数据 - urllib / requests / aiohttp。
2.
解析数据 - re / lxml / beautifulsoup4(bs4)/ pyquery。
3.
持久化 - pymysql / redis / sqlalchemy / peewee / pymongo。
4.
调度器 - 进程 / 线程 / 协程。
3.
缓存和持久化 - pymysql / redis / sqlalchemy / peewee / pymongo。
4.
序列化和压缩 - pickle / json / zlib。
5.
调度器 - 进程 / 线程 / 协程。
### HTML页面分析
...
...
@@ -86,3 +87,40 @@
> 说明:更多内容可以参考BeautifulSoup的[官方文档]()。
### 例子 - 获取知乎发现上的问题链接
```
Python
from urllib.parse import urljoin
import re
import requests
from bs4 import BeautifulSoup
def main():
headers = {'user-agent': 'Baiduspider'}
proxies = {
'http': 'http://122.114.31.177:808'
}
base_url = 'https://www.zhihu.com/'
seed_url = urljoin(base_url, 'explore')
resp = requests.get(seed_url,
headers=headers,
proxies=proxies)
soup = BeautifulSoup(resp.text, 'lxml')
href_regex = re.compile(r'^/question')
link_set = set()
for a_tag in soup.find_all('a', {'href': href_regex}):
if 'href' in a_tag.attrs:
href = a_tag.attrs['href']
full_url = urljoin(base_url, href)
link_set.add(full_url)
print('Total %d question pages found.' % len(link_set))
if __name__ == '__main__':
main()
```
Day66-75/03.存储数据.md
View file @
b3a88734
## 存储数据
### 缓存和持久化
### 数据缓存
通过前面章节的内容,我们已经知道了如何从指定的页面中抓取数据,以及如何保存抓取的结果,但是我们没有考虑过这么一种情况,就是我们可能需要从已经抓取过的页面中提取出更多的数据,重新去下载这些页面对于规模不大的网站倒是问题也不大,但是如果能够把这些页面缓存起来,对应用的性能会有明显的改善。
### 使用NoSQL
#### Redis
#### Mongo
### 磁盘文件缓存
### 数据库缓存
Day66-75/code/example02.py
View file @
b3a88734
...
...
@@ -13,7 +13,7 @@ def main():
</head>
<body>
<h1>Hello, world!</h1>
<p>这是一个
神奇
的网站!</p>
<p>这是一个
<em>神奇</em>
的网站!</p>
<hr>
<div>
<h2>这是一个例子程序</h2>
...
...
@@ -44,14 +44,28 @@ def main():
print
(
soup
.
title
)
# JavaScript - document.body.h1
print
(
soup
.
body
.
h1
)
print
(
soup
.
find_all
(
re
.
compile
(
r'^h'
)))
print
(
soup
.
p
)
print
(
soup
.
body
.
p
.
text
)
print
(
soup
.
body
.
p
.
contents
)
for
p_child
in
soup
.
body
.
p
.
children
:
print
(
p_child
)
print
(
len
([
elem
for
elem
in
soup
.
body
.
children
]))
print
(
len
([
elem
for
elem
in
soup
.
body
.
descendants
]))
print
(
soup
.
findAll
(
re
.
compile
(
r'^h[1-6]'
)))
print
(
soup
.
body
.
find_all
(
r'^h'
))
print
(
soup
.
body
.
div
.
find_all
(
re
.
compile
(
r'^h'
)))
print
(
soup
.
find_all
(
re
.
compile
(
r'r$'
)))
print
(
soup
.
find_all
(
'img'
,
{
'src'
:
re
.
compile
(
r'\./img/\w+.png'
)}))
print
(
soup
.
find_all
(
lambda
x
:
len
(
x
.
attrs
)
==
2
))
print
(
soup
.
find_all
(
foo
))
print
(
soup
.
find_all
(
'p'
,
{
'class'
:
'foo'
}))
for
elem
in
soup
.
select
(
'a[href]'
):
print
(
elem
.
attrs
[
'href'
])
def
foo
(
elem
):
return
len
(
elem
.
attrs
)
==
2
if
__name__
==
'__main__'
:
main
()
Day66-75/code/example04.py
0 → 100644
View file @
b3a88734
from
urllib.parse
import
urljoin
import
re
import
requests
from
bs4
import
BeautifulSoup
def
main
():
headers
=
{
'user-agent'
:
'Baiduspider'
}
proxies
=
{
'http'
:
'http://122.114.31.177:808'
}
base_url
=
'https://www.zhihu.com/'
seed_url
=
urljoin
(
base_url
,
'explore'
)
resp
=
requests
.
get
(
seed_url
,
headers
=
headers
,
proxies
=
proxies
)
soup
=
BeautifulSoup
(
resp
.
text
,
'lxml'
)
href_regex
=
re
.
compile
(
r'^/question'
)
link_set
=
set
()
for
a_tag
in
soup
.
find_all
(
'a'
,
{
'href'
:
href_regex
}):
if
'href'
in
a_tag
.
attrs
:
href
=
a_tag
.
attrs
[
'href'
]
full_url
=
urljoin
(
base_url
,
href
)
link_set
.
add
(
full_url
)
print
(
'Total
%
d question pages found.'
%
len
(
link_set
))
if
__name__
==
'__main__'
:
main
()
Day66-75/code/example05.py
0 → 100644
View file @
b3a88734
from
urllib.error
import
URLError
from
urllib.request
import
urlopen
import
re
import
redis
import
ssl
import
hashlib
import
logging
import
pickle
import
zlib
# Redis有两种持久化方案
# 1. RDB
# 2. AOF
# 通过指定的字符集对页面进行解码(不是每个网站都将字符集设置为utf-8)
def
decode_page
(
page_bytes
,
charsets
=
(
'utf-8'
,)):
page_html
=
None
for
charset
in
charsets
:
try
:
page_html
=
page_bytes
.
decode
(
charset
)
break
except
UnicodeDecodeError
:
pass
# logging.error('[Decode]', err)
return
page_html
# 获取页面的HTML代码(通过递归实现指定次数的重试操作)
def
get_page_html
(
seed_url
,
*
,
retry_times
=
3
,
charsets
=
(
'utf-8'
,)):
page_html
=
None
try
:
if
seed_url
.
startswith
(
'http://'
)
or
\
seed_url
.
startswith
(
'https://'
):
page_html
=
decode_page
(
urlopen
(
seed_url
)
.
read
(),
charsets
)
except
URLError
as
err
:
logging
.
error
(
'[URL]'
,
err
)
if
retry_times
>
0
:
return
get_page_html
(
seed_url
,
retry_times
=
retry_times
-
1
,
charsets
=
charsets
)
return
page_html
# 从页面中提取需要的部分(通常是链接也可以通过正则表达式进行指定)
def
get_matched_parts
(
page_html
,
pattern_str
,
pattern_ignore_case
=
re
.
I
):
pattern_regex
=
re
.
compile
(
pattern_str
,
pattern_ignore_case
)
return
pattern_regex
.
findall
(
page_html
)
if
page_html
else
[]
# 开始执行爬虫程序并对指定的数据进行持久化操作
def
start_crawl
(
seed_url
,
match_pattern
,
*
,
max_depth
=-
1
):
client
=
redis
.
Redis
(
host
=
'120.77.222.217'
,
port
=
11223
,
password
=
'1qaz2wsx'
)
charsets
=
(
'utf-8'
,
'gbk'
,
'gb2312'
)
logging
.
info
(
'[Redis ping]'
,
client
.
ping
())
url_list
=
[
seed_url
]
visited_url_list
=
{
seed_url
:
0
}
while
url_list
:
current_url
=
url_list
.
pop
(
0
)
depth
=
visited_url_list
[
current_url
]
if
depth
!=
max_depth
:
page_html
=
get_page_html
(
current_url
,
charsets
=
charsets
)
links_list
=
get_matched_parts
(
page_html
,
match_pattern
)
for
link
in
links_list
:
if
link
not
in
visited_url_list
:
visited_url_list
[
link
]
=
depth
+
1
page_html
=
get_page_html
(
link
,
charsets
=
charsets
)
if
page_html
:
hasher
=
hashlib
.
md5
()
hasher
.
update
(
link
.
encode
(
'utf-8'
))
zipped_page
=
zlib
.
compress
(
pickle
.
dumps
(
page_html
))
client
.
set
(
hasher
.
hexdigest
(),
zipped_page
)
def
main
():
ssl
.
_create_default_https_context
=
ssl
.
_create_unverified_context
start_crawl
(
'http://sports.sohu.com/nba_a.shtml'
,
r'<a[^>]+test=a\s[^>]*href=["\'](.*?)["\']'
,
max_depth
=
2
)
if
__name__
==
'__main__'
:
main
()
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment