共计 6630 个字符,预计需要花费 17 分钟才能阅读完成。
通过定时执行 Python 脚本,可以实现定期批量访问网站,如果发现网站打不开,第一时间发邮件到管理员邮箱进行预警。
这里用的是 Python3.5
需要安装的插件:
smtplib: 发邮件需要用到
pycurl: 访问网站时会需要用到
linecache: 在读取 txt 网站清单时需要用到
具体思路:
python 程序从 txt 里面批量读取到网站的信息, 通过 Curl.py 模拟浏览器去访问网站, 并且把访问的结果写入到以自己的网站名称 - 日期.txt 格式的文件中记录; 有几种情况:
1、如果发现打不开了, 直接发邮件提示网站已经打不开
2、发现可以打开, 读取文件中上一次访问的情况 (读取 txt 文件最后一行),
1) 如果发现上一次是打不开的, 发邮件提醒网站已经恢复了
2) 如果发现上一次是打得开的 (200 的返回码), 只是记录网站访问的日志就可以了
总共 4 个文件,
Email.py 是邮件类, 主要用来发邮件的时候调用, 这里需要按照你的情况改成你的邮箱 (msg[‘From’]), 邮箱服务器地址 (SMTP 地址), 和你的邮箱密码 (SMTP.login)
Email.py
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
class Email_send(object):
def __init__(self,msgTo,data2,Subject):
self.msgTo=msgTo
self.data2=data2
self.Subject=Subject
def sendEmail(self):
# (attachment,html) = content
msg = MIMEMultipart()
msg['Subject'] = self.Subject
msg['From'] = 'xxxx@xxxx.com.cn'
msg['To'] = self.msgTo
html_att = MIMEText(self.data2, 'html', 'utf-8')
#att = MIMEText(attachment, 'plain', 'utf-8')
msg.attach(html_att)
#msg.attach(att)
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.xxxx.com', 25)
smtp.login(msg['From'], 'xxxx') #改成自己的邮箱密码
smtp.sendmail(msg['From'], msg['To'].split(','), msg.as_string())
return('邮件发送成功')
except Exception as e:
print('--------------sss------',e)
def curl(self):
import pycurl
c=pycurl.Curl()
#url="www.linuxidc.com"
#indexfile=open(os.path.dirname(os.path.realpath(__file__))+"/content.txt","wb")
c.setopt(c.URL,url)
c.setopt(c.VERBOSE,1)
c.setopt(c.ENCODING,"gzip")
#模拟火狐浏览器
c.setopt(c.USERAGENT,"Mozilla/5.0 (Windows NT 6.1; rv:35.0) Gecko/20100101 Firefox/35.0")
return c
Curl.py 主要用来执行模拟浏览器访问网站并返回结果的文件
#!/usr/bin/python
#-*- coding:utf-8 -*-
import sys
import pycurl
class Curl(object):
def __init__(self,url):
self.url=url
def Curl_site(self):
c=pycurl.Curl()
#url="www.linuxidc.com"
#indexfile=open(os.path.dirname(os.path.realpath(__file__))+"/content.txt","wb")
c.setopt(c.URL,self.url)
c.setopt(c.VERBOSE,1)
c.setopt(c.ENCODING,"gzip")
#模拟火狐浏览器
c.setopt(c.USERAGENT,"Mozilla/5.0 (Windows NT 6.1; rv:35.0) Gecko/20100101 Firefox/35.0")
return c
site_moniter.py 这个文件为主程序, 主要执行调用上面的函数, 读取 txt 文件中的网站清单, 如果网站打不开就发邮件出来告警
需要注意:1、把 xxxx@xxxx.com 改成你自己的邮箱,
2、把文件路径改成自己的真实路径
#!/usr/bin/python
#-*- coding:utf-8 -*-
import pycurl
import os
import sys
import linecache
import time #引入事件类,用来获取系统当前时间
#from ceshi import Student
from Email import Email_send
from Curl import Curl
#bart = Student('mafei',59)
#bart.print_score()
def script(urls,type):
msgTo = 'xxxx@xxxx.com'
now_time=time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
j=1
# data2=[{'aa':'aa'}]
for url_split in urls:
#print(url_split)
url_1=url_split.split('---')
url=url_1[1]
recovery_title = "监控通知 ----%s url:%s" % (url_1[0], url) + "在" + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + "已经恢复"
down_title = "监控通知 ----%s url:%s" % (url_1[0], url) + "在" + time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time())) + "无法打开"
#print('~~~~~~~~~~~~~~~~~~~')
#print(url)
#引用爬去网站的类,调用结果
url_result = Curl(url)
c = url_result.Curl_site()
try:
c.perform()
code = str(c.getinfo(c.HTTP_CODE))
print(code+'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
except Exception as e:
print('-------- 错误信息:--------',e)
#indexfile.close()
#c.close()
code = str(c.getinfo(c.HTTP_CODE))
# print(code+'aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')
filename = '%s-%s.txt' % (url_1[0], time.strftime("%Y-%m-%d", time.localtime(time.time())))
#判断如果在网站无法打开的情况下
if code == '0' or code=='400' or code=='500' or code=='404':
resolveTime = 0
Connection_Time = 0
Transfer_Total_Time = 0
Total_Time = 0
# print('为 000000000000000000000000000000000000000000')
data3 = '网站:%s 无法打开 %s' % (url_1[0], url)
# indexfile.close()
# c.close()
#判断网站如果挂了就发邮件
stat3 = Email_send(msgTo, data3, down_title)
resole=stat3.sendEmail()
print(resole)
print(data3 + '邮件已经发送')
else:
#resolveTime = str(c.getinfo(c.NAMELOOKUP_TIME) * 1000) + "ms"
# Connection_Time=str(float(c.getinfo(c.CONNECT_TIME)*1000-c.getinfo(c.NAMELOOKUP_TIME)*1000))+"ms"
#Connection_Time = str(c.getinfo(c.CONNECT_TIME) * 1000 - c.getinfo(c.NAMELOOKUP_TIME) * 1000) + "ms"
# Connection_Time=round(float(Connection_Time))
#Transfer_Total_Time = str(c.getinfo(c.TOTAL_TIME) * 1000 - c.getinfo(c.PRETRANSFER_TIME) * 1000) + "ms"
#Total_Time = str(c.getinfo(c.TOTAL_TIME) * 1000) + "ms"
# data2=data
# data={'url':url,'HTTP CODE':code,'resolveTime':resolveTime,'Connection_Time':Connection_Time,'Transfer_Total_Time':Transfer_Total_Time,'Total_Time':Total_Time}
print('网站可以正常打开')
#f = open(filename, 'a',encoding='utf-8')
file_exit=os.path.exists(filename)
#print(file_exit)
#判断这个日志文件存不存在
if(file_exit):
#读取文件最后一行,为了读取出来最后一次的状态值
file = open(filename, 'r',encoding='utf-8')
linecount = len(file.readlines())
data = linecache.getline(filename, linecount)
file.close
if data == '':
print('这是'+data+'为空的数据')
else:
print('其他信息 %s'%(data))
explode = data.split('----')
#判断如果读取出来的值,最后一次是异常的情况就告警
if explode[3]=='0\n' or explode[3]=='400\n' or explode[3]=='500' or explode[3]=='404':
data3 = '网站:%s 在 %s 已经恢复 %s' % (url_1[0], now_time,url)
stat3 = Email_send(msgTo, data3, recovery_title)
resole = stat3.sendEmail()
print(resole)
print(data3 + '邮件已经发送')
else:
print('最后一次记录为其他值:%s'%(explode[3])+'-----')
else:
print('文件不存在')
data2 = '\n' + url_1[0] + '----' + url + '-----' + time.strftime("%H:%M:%S", time.localtime(time.time())) + '-------' + code
print('data2 数据写入成功:' + data2)
file = open(filename, 'a', encoding='utf-8')
file.write(data2)
file.close
# bart = Student(data2,59)
# bart.print_score()
if __name__ == "__main__":
type = "监控通知 - 测试" + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
data1=['公司门户 ---www.linuxidc.com','公司平台 ---www.linuxidc.net']
#script(data1,type)
#中心层面的网站清单
file=open('D:\python\site_moniter\zhongxin.txt')
data2=[]
while 1:
line2 =file.readline()
print(line2)
if not line2:
break
data2.append(line2[0:-1])
#data2=['www.linuxidc.com','www.linuxidc.net','www.qq.com']
print(data2)
title="监控通知 - 中心"+ time.strftime("%Y-%m-%d %H:%M:%S",time.localtime(time.time()))
script(data2,title)
本文永久更新链接地址 :http://www.linuxidc.com/Linux/2016-08/134589.htm