python爬取当前文章记录修正版

#encoding:UTF-8
#测试爬虫例子
#爬取网页内容,保存数据库,
#下载图片,上传七牛云
#!/usr/bin/python3
# encoding:utf-8
'''
Created on 2020-04-21 08:20
@author: king
res = requests.get(url,params=data,headers = headers)
res = requests.post(url, data=json_data, headers = headers)
'''
import requests
import sys,os
import time
from bs4 import BeautifulSoup
from login import models
from login import qintupin
import hashlib

class csdn(object):
	"""docstring for ClassName"""
	def __init__(self, href):
		super(csdn, self).__init__()
		self.href = href
		
    #获取页面信息
	def gethtml(self,helf):
		try:
		 	headers = {
		 	'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
		 	}
		 	response = requests.get(helf,data={},headers=headers)
		 	if response.status_code==200:
		 		response=response.content.decode("utf-8")
            	return response.text
            else:
            	return ""
	 	except:
	 		return ""
    #下载页面图片
	def downimgs(self):
	 	kpath = os.path.join("D:/python/k5003/login/", "/images/")
	 	strimg=self.href.split("/")[-1]
	 	headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
	 	html = requests.get(self.href,data={},headers=headers)
	 	print(kpath+strimg)
	 	filepath="D:/python/k5003/login/images/"+strimg
		# 将图片保存到D盘,然后上传七牛云
	 	with open(filepath,"wb") as f:
	 		f.write(html.content)
	 		time.sleep(2)
	 		tupian=qintupin.uploadqiniu("1222252","","")
	 		token2=tupian.upload_token()
	 		tupian.token=token2
	 		tupian.filepath=filepath
	 		tupian.filename=strimg
	 		tupian.update_file()


    #获取指定信息
	def get_content(self):
	 	href = "https://blog.csdn.net/qqjjj"
	 	html = self.gethtml(href)
	 	if(html != ""):
		 	#print(type(response.text),type(response.content))
		 	#soup = BeautifulSoup(html,"html.parser")
		 	soup = BeautifulSoup(html,"lxml")
		 	lables = soup.find('div', class_="article-list").find_all(class_="article-item-box")
		 	about = soup.find('div', class_="article-list").find_all("p",class_="content")
		 	data=[];
		 	i=0;
		 	a = hashlib.md5()
		 	for x in lables:
		 		print(x);
		 		href=dict(x.find("a").attrs)["href"]
		 		aboutstr=x.find("p",class_="content").text
		 		print(aboutstr)
		 		self.href=href
		 		html2=self.gethtml(href)
		 		soup2 = BeautifulSoup(html2,"html.parser")
		 		title=soup2.find('h1', id="articleContentId")
		 		datatime=soup2.find('span', class_="time")
		 		content=soup2.find('div', id="article_content")
		 		clickno=soup2.find('span', class_="read-count")
		 		#获取图片信息
		 		img=content.find_all("img")
		 		markstr=str(content).replace("https://img.k5003.com","https://img.k5003.com")
		 		time.sleep(2)
		 		imgurl="";
		 		for k in img:
		 			imgurl=dict(k.attrs)["src"].split("?")[0]
		 			#self.href=imgurl
		 			#self.downimgs()
		 			print(imgurl)
		 		#data.append(title)
		 		#判断是否存在
		 		a.update(href.encode(encoding='utf-8'))
		 		kmd5=a.hexdigest()
		 		models.fcomplex.objects.filter(f_md5url=kmd5)
		 		#保存数据到表
		 		i=i+1
		 		fcomplex = models.fcomplex(
	                f_id=i,
	                f_md5url=kmd5,
	                f_name=str(title.text),
	                f_createname="king",
	                f_clickno=clickno.text,
	                f_addtime=datatime.text,
	                f_key=str(title.text),
	                f_about=str(aboutstr),
	                f_maxurl=str(imgurl).replace("https://img.k5003.com","https://img.k5003.com"),
	                f_remark=markstr
	                )
		 		fcomplex.save()
		 	return data
		else:
			pass