博客
关于我
强烈建议你试试无所不能的chatGPT,快点击我
数据结构化与保存
阅读量:7006 次
发布时间:2019-06-28

本文共 9115 字,大约阅读时间需要 30 分钟。

1.结构化:

  • 单条新闻的详情字典:news
  • 一个列表页所有单条新闻汇总列表:newsls.append(news)
  • 所有列表页的所有新闻汇总列表:newstotal.extend(newsls)

(1)单条新闻的详情字典:news

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)print(getclick('http://news.gzcc.cn/html/2017/xiaoyuanxinwen_1017/8338.html'))

结果:

(2)一个列表页所有单条新闻汇总列表:newsls.append(news)

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)def getdetail(url):    resd=requests.get(url)    resd.encoding='utf-8'    soupd=BeautifulSoup(resd.text,'html.parser')    news={}    news['url']=url    news['title']=soupd.select('.show-title')[0].text    info=soupd.select(".show-info")[0].text    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')    news['source']=re.search('来源:(.*)点击',info).group(1).strip()    #news['content']=soupd.select('.show-content')[0].text.strip()    news['click']=getclick(url)    return(news)print(getdetail('http://news.gzcc.cn/html/2017/xiaoyuanxinwen_1017/8338.html'))

结果:

(3)所有列表页的所有新闻汇总列表:newstotal.extend(newsls)

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)def getdetail(url):    resd=requests.get(url)    resd.encoding='utf-8'    soupd=BeautifulSoup(resd.text,'html.parser')    news={}    news['url']=url    news['title']=soupd.select('.show-title')[0].text    info=soupd.select(".show-info")[0].text    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')    news['source']=re.search('来源:(.*)点击',info).group(1).strip()    #news['content']=soupd.select('.show-content')[0].text.strip()    news['click']=getclick(url)    return(news)def onepage(pageurl):    res=requests.get(pageurl)    res.encoding='utf-8'    soup=BeautifulSoup(res.text,'html.parser')    newsls=[]    for news in soup.select('li'):        if len(news.select('.news-list-title'))>0:            newsls.append(getdetail(news.select('a')[0]['href']))    return (newsls)print(onepage('http://news.gzcc.cn/html/xiaoyuanxinwen/'))

结果:

2.转换成pandas的数据结构DataFrame

 

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)def getdetail(url):    resd=requests.get(url)    resd.encoding='utf-8'    soupd=BeautifulSoup(resd.text,'html.parser')    news={}    news['url']=url    news['title']=soupd.select('.show-title')[0].text    info=soupd.select(".show-info")[0].text    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')    news['source']=re.search('来源:(.*)点击',info).group(1).strip()    #news['content']=soupd.select('.show-content')[0].text.strip()    news['click']=getclick(url)    return(news)def onepage(pageurl):    res=requests.get(pageurl)    res.encoding='utf-8'    soup=BeautifulSoup(res.text,'html.parser')    newsls=[]    for news in soup.select('li'):        if len(news.select('.news-list-title'))>0:            newsls.append(getdetail(news.select('a')[0]['href']))    return (newsls)newstotal=[]gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'newstotal.extend(onepage(gzccurl))res=requests.get(gzccurl)res.encoding='utf-8'soup=BeautifulSoup(res.text,'html.parser')n=int(soup.select('.a1')[0].text.rstrip('条'))pages=n//10+1 for i in range(2,3):    listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)    newstotal.extend(onepage(listurl))#print(len(newstotal))   #20df = pandas.DataFrame(newstotal)print(df.head())

结果:

3.从DataFrame保存到excel

 

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)def getdetail(url):    resd=requests.get(url)    resd.encoding='utf-8'    soupd=BeautifulSoup(resd.text,'html.parser')    news={}    news['url']=url    news['title']=soupd.select('.show-title')[0].text    info=soupd.select(".show-info")[0].text    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')    news['source']=re.search('来源:(.*)点击',info).group(1).strip()    #news['content']=soupd.select('.show-content')[0].text.strip()    news['click']=getclick(url)    return(news)def onepage(pageurl):    res=requests.get(pageurl)    res.encoding='utf-8'    soup=BeautifulSoup(res.text,'html.parser')    newsls=[]    for news in soup.select('li'):        if len(news.select('.news-list-title'))>0:            newsls.append(getdetail(news.select('a')[0]['href']))    return (newsls)newstotal=[]gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'newstotal.extend(onepage(gzccurl))res=requests.get(gzccurl)res.encoding='utf-8'soup=BeautifulSoup(res.text,'html.parser')n=int(soup.select('.a1')[0].text.rstrip('条'))pages=n//10+1 for i in range(2,3):    listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)    newstotal.extend(onepage(listurl))#print(len(newstotal))   #20df = pandas.DataFrame(newstotal)print(df.head())print(df['title'])print(df[df.click>5000])df.to_excel('gzccnews.xlsx')

结果:

4.从DataFrame保存到sqlite3数据库

import requestsfrom bs4 import BeautifulSoupfrom datetime import datetimeimport reimport pandasimport sqlite3url = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'res = requests.get(url)res.encoding = 'utf-8'soup = BeautifulSoup(res.text, 'html.parser')def getclick(url):    m=re.search(r'_(.*).html',url)    newsid=m.group(1)[5:]    clickurl='http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsid)    resc=requests.get(clickurl).text      r=re.search(r'hits(.*)',resc).group(1)    click=r.lstrip("').html('").rstrip("');")    return int(click)def getdetail(url):    resd=requests.get(url)    resd.encoding='utf-8'    soupd=BeautifulSoup(resd.text,'html.parser')    news={}    news['url']=url    news['title']=soupd.select('.show-title')[0].text    info=soupd.select(".show-info")[0].text    news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19],'%Y-%m-%d %H:%M:%S')    news['source']=re.search('来源:(.*)点击',info).group(1).strip()    #news['content']=soupd.select('.show-content')[0].text.strip()    news['click']=getclick(url)    return(news)def onepage(pageurl):    res=requests.get(pageurl)    res.encoding='utf-8'    soup=BeautifulSoup(res.text,'html.parser')    newsls=[]    for news in soup.select('li'):        if len(news.select('.news-list-title'))>0:            newsls.append(getdetail(news.select('a')[0]['href']))    return (newsls)newstotal=[]gzccurl='http://news.gzcc.cn/html/xiaoyuanxinwen/'newstotal.extend(onepage(gzccurl))res=requests.get(gzccurl)res.encoding='utf-8'soup=BeautifulSoup(res.text,'html.parser')n=int(soup.select('.a1')[0].text.rstrip('条'))pages=n//10+1 for i in range(2,3):    listurl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)    newstotal.extend(onepage(listurl))#print(len(newstotal))   #20df = pandas.DataFrame(newstotal)print(df.head())print(df['title'])print(df[df.click>5000])with sqlite3.connect('gzccnews_db.sqlite') as db:    df.to_sql('news_table',con = db)

结果:

 

转载于:https://www.cnblogs.com/fatmanwu/p/7695663.html

你可能感兴趣的文章