Commit 0b0e0582 authored by 胡凯旋's avatar 胡凯旋

init

parents
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
env/
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*,cover
# Translations
*.mo
*.pot
# Django stuff:
*.log
# Sphinx documentation
docs/_build/
# PyBuilder
target/
# Extra
.idea/
.vscode/
.env/
.env2/
.python-version
\ No newline at end of file
# -*- coding:utf-8 -*-
import time
import pprint
import json
from aliyunsdkcore import client
from aliyunsdkcms.request.v20170301 import QueryMetricListRequest, QueryMetricLastRequest
import settings
class ALiYun(object):
def __init__(self):
self.key_id = settings.ALIYUN_KEY
self.key_secret = settings.ALIYUN_KEY_SECRET
def get_client(self, region):
clt = client.AcsClient(self.key_id, self.key_secret, region)
return clt
def json_format(self, response):
if isinstance(response, bytes):
response = response.decode()
return json.loads(response)
class CMS(ALiYun):
def get_rds_metric(self, rds_instance_id, metric, duration=60*3):
"""
获取实例ID为rds_instance_id的metric监控项
:param duration: 过去多少时间内的metrics, 单位s
"""
client = self.get_client('cn-qingdao')
request = QueryMetricListRequest.QueryMetricListRequest()
request.set_accept_format('json')
request.set_Project('acs_rds_dashboard')
request.set_Metric(metric)
end_timestamp = int(time.time()) * 1000
start_timestamp = end_timestamp - duration*1000
request.set_StartTime(start_timestamp)
request.set_EndTime(end_timestamp)
dimensions = {
'instanceId': rds_instance_id
}
request.set_Dimensions(json.dumps(dimensions))
# request.set_Period('60')
result = client.do_action_with_exception(request)
# pprint.pprint(self.json_format(result))
return self.json_format(result)
def main():
aliyun_cms = CMS()
rds_instance_list = settings.RDS_INSTANCES
for rds_instance in rds_instance_list.keys():
aliyun_cms.get_rds_metric(rds_instance, 'CpuUsage')
aliyun_cms.get_rds_metric(rds_instance, 'DiskUsage')
if __name__ == '__main__':
main()
# -*- coding:utf-8 -*-
import time
import json
import random
from prometheus_client import start_http_server
from prometheus_client.core import GaugeMetricFamily, REGISTRY
import settings
from aliyun import CMS
class RDSCollector(object):
def collect(self):
aliyun_cms = CMS()
# instances = ['rds-0', 'rds-1', 'rds-2']
rds_instances = settings.RDS_INSTANCES
metrics = []
for rds_id, rds_name in rds_instances.items():
cpu_usage_metric = GaugeMetricFamily('rds_cpu_usage', 'cpu usage of this rds instance', labels=['rds_name'])
disk_usage_metric = GaugeMetricFamily('rds_disk_usage', 'disk usage of this rds instance', labels=['rds_name'])
iops_usage_metric = GaugeMetricFamily('rds_iops_usage', 'iops usage', labels=['rds_name'])
connection_usage_metric = GaugeMetricFamily('rds_connection_usage', 'connection usage', labels=['rds_name'])
# 'data_delay': ,
memory_usage_metric = GaugeMetricFamily('rds_memory_usage', 'memory usage', labels=['rds_name'])
# 'mysql_network_in': ,
# 'mysql_network_out': ,
cpu_datapoints = aliyun_cms.get_rds_metric(rds_id, 'CpuUsage')['Datapoints']
# disk_datapoints = aliyun_cms.get_rds_metric(rds_id, 'DiskUsage')['Datapoints']
# iops_datapoints = aliyun_cms.get_rds_metric(rds_id, 'IOPSUsage')['Datapoints']
# connection_datapoints = aliyun_cms.get_rds_metric(rds_id, 'ConnectionUsage')['Datapoints']
memory_datapoints = aliyun_cms.get_rds_metric(rds_id, 'MemoryUsage')['Datapoints']
# datapoints按时间升序排列
for p in cpu_datapoints:
cpu_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# for p in disk_datapoints:
# disk_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# for p in iops_datapoints:
# iops_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# for p in connection_datapoints:
# connection_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
for p in memory_datapoints:
memory_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# # 一次抓的多个时间升序排列的metric可同时被prometheus收集到
# current_time = time.time()
# for i in range(16, 0, -1):
# cpu_usage_metric.add_metric([rds_name], random.random(), current_time-i)
# disk_usage_metric.add_metric([rds_name], random.random(), current_time-i)
metrics.extend([
cpu_usage_metric, disk_usage_metric, iops_usage_metric, connection_usage_metric,
memory_usage_metric,
])
for m in metrics:
yield m
def main():
REGISTRY.register(RDSCollector())
start_http_server(7086)
while True:
time.sleep(1)
if __name__ == '__main__':
main()
\ No newline at end of file
# -*- coding:utf-8 -*-
ALIYUN_KEY = ''
ALIYUN_KEY_SECRET = ''
# 要监控的rds的实例ID与实例描述的映射
RDS_INSTANCES = {
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment