您好,登录后才能下订单哦!
本篇内容介绍了“怎么用python抓取百度贴吧内容”的有关知识,在实际案例的操作过程中,不少人都会遇到这样的困境,接下来就让小编带领大家学习一下如何处理这些情况吧!希望大家仔细阅读,能够学有所成!
# -*- coding: utf-8
import urllib2
import urllib
import re,os
import time
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class Tiebar:
#初始化数据
def __init__(self,base_url,see_lz):
self.base_url = base_url
self.see_lz = '?see_lz=' + see_lz
self.page = 1
self.user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
self.headers = { 'User-Agent' : self.user_agent }
self.tool = Tool()
self.out_put_file = 'd:/python/test/out.txt'
#获取页面内容的方法
def get_cotent(self,page):
try:
url = self.base_url + self.see_lz + '&pn=' + str(page)
request = urllib2.Request(url,headers=self.headers)
response = urllib2.urlopen(request)
act_url = response.geturl()
print 'init url=',url,'act url=',act_url
if url == act_url:
content = response.read()
return content
else:
return None
except urllib2.URLError, e:
if hasattr(e,"reason"):
print u"连接贴吧页面失败,错误原因",e.reason
return None
#获取帖子主题
def get_titile(self):
content = self.get_cotent(1)
pattern = re.compile('<h4 .*?>(.*?)</h4>')
result = re.search(pattern,content)
if result:
return result.group(1).strip()
else:
return None
#获取帖子的页数
def get_page_num(self):
content = self.get_cotent(1)
pattern = re.compile('<li class="l_reply_num.*?.*?<span.*?>(.*?)',re.S)
result = re.search(pattern,content)
if result:
return result.group(1).strip()
else:
return None
#获取帖子内容
def get_tiebar(self,page):
content = self.get_cotent(page).decode('utf-8')
str = ''
if not content:
print "抓取完毕"
return None
patt_content = re.compile('<a data-field=.*?class="p_author_name j_user_card".*?>(.*?)</a>.*?'
+ '<div id=".*?" class="d_post_content j_d_post_content "> '
+ '(.*?)',re.S)
msg = re.findall(patt_content,content)
for item in msg:
str = str + '\n作者-----' + item[0] + '\n' + '帖子内容-----' + item[1].strip() + '\n'
str = self.tool.replace(str)
# print u'作者',item[0],u'帖子内容:',item[1]
return str
#写文件
def writeStr2File(self,out_put_file,str1,append = 'a'):
# 去掉文件,保留路径。比如 'a/b/c/d.txt' 经过下面代码会变成 'a/b/c'
subPath = out_put_file[:out_put_file.rfind('/')]
# 如果给定的路径中,文件夹不存在,则创建
if not os.path.exists(subPath):
os.makedirs(subPath)
# 打开文件并将 str 内容写入给定的文件
with open(out_put_file, append) as f:
f.write(str1.strip()+'\n')
def start_crawl(self):
page_num = self.get_page_num()
if page_num == None:
print "url已失效,请重试!"
return
print u"该帖子共有" + str(page_num) + u"页数据"
for i in range(1,int(page_num)+1):
print u"正在写入第" + str(i) + u"页数据..."
content = "正在写入第" + str(i) + u"页数据------------------------------------\n" + self.get_tiebar(i)
self.writeStr2File(self.out_put_file,content)
class Tool:
#去除img标签
patt_img = re.compile(r'<img.*?>')
patt_herf = re.compile(r'<a.*?>|</a>')
patt_br = re.compile(r'<br>{1,3}')
def replace(self,content):
content = re.sub(self.patt_img,"",content)
content = re.sub(self.patt_herf,"",content)
content = re.sub(self.patt_br,"\n",content)
return content.strip()
tiebar = Tiebar('http://tieba.baidu.com/p/3138733512','1')
# title = tiebar.get_titile()
# page_num = tiebar.get_page_num()
# print title,page_num
tiebar.start_crawl()
“怎么用python抓取百度贴吧内容”的内容就介绍到这里了,感谢大家的阅读。如果想了解更多行业相关的知识可以关注亿速云网站,小编将为大家输出更多高质量的实用文章!
免责声明:本站发布的内容(图片、视频和文字)以原创、转载和分享为主,文章观点不代表本网站立场,如果涉及侵权请联系站长邮箱:is@yisu.com进行举报,并提供相关证据,一经查实,将立刻删除涉嫌侵权内容。