Skip to content
Projects
Groups
Snippets
Help
Loading...
Sign in
Toggle navigation
G
gm-mysql-exporter
Project
Project
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
system
gm-mysql-exporter
Commits
f9e61558
Commit
f9e61558
authored
Oct 19, 2018
by
胡凯旋
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
batch get aliyun api
parent
0b0e0582
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
39 additions
and
47 deletions
+39
-47
aliyun.py
aliyun.py
+6
-4
main.py
main.py
+33
-43
No files found.
aliyun.py
View file @
f9e61558
...
...
@@ -24,11 +24,13 @@ class ALiYun(object):
class
CMS
(
ALiYun
):
def
get_rds_metric
(
self
,
rds_instance_id
,
metric
,
duration
=
60
*
3
):
def
get_rds_metric
(
self
,
rds_instance_id
s
,
metric
,
duration
=
60
*
3
):
"""
获取实例ID为rds_instance_id的metric监控项
获取实例ID在rds_instance_id列表中的metric监控项
https://yq.aliyun.com/ask/426959?spm=a2c4e.11154873.tagmain.6.4c8d671b5KwueI一次最多获取十个实例的监控项
:param duration: 过去多少时间内的metrics, 单位s
"""
assert
len
(
rds_instance_ids
)
<=
10
,
'一次查询的rds_instace_ids不能超过十个'
client
=
self
.
get_client
(
'cn-qingdao'
)
request
=
QueryMetricListRequest
.
QueryMetricListRequest
()
request
.
set_accept_format
(
'json'
)
...
...
@@ -38,9 +40,9 @@ class CMS(ALiYun):
start_timestamp
=
end_timestamp
-
duration
*
1000
request
.
set_StartTime
(
start_timestamp
)
request
.
set_EndTime
(
end_timestamp
)
dimensions
=
{
dimensions
=
[
{
'instanceId'
:
rds_instance_id
}
}
for
rds_instance_id
in
rds_instance_ids
]
request
.
set_Dimensions
(
json
.
dumps
(
dimensions
))
# request.set_Period('60')
result
=
client
.
do_action_with_exception
(
request
)
...
...
main.py
View file @
f9e61558
...
...
@@ -2,6 +2,7 @@
import
time
import
json
import
random
import
math
from
prometheus_client
import
start_http_server
from
prometheus_client.core
import
GaugeMetricFamily
,
REGISTRY
...
...
@@ -9,53 +10,42 @@ from prometheus_client.core import GaugeMetricFamily, REGISTRY
import
settings
from
aliyun
import
CMS
aliyun_cms
=
CMS
()
rds_instances
=
settings
.
RDS_INSTANCES
class
RDSCollector
(
object
):
def
collect
(
self
):
aliyun_cms
=
CMS
()
# instances = ['rds-0', 'rds-1', 'rds-2']
rds_instances
=
settings
.
RDS_INSTANCES
metrics
=
[]
for
rds_id
,
rds_name
in
rds_instances
.
items
():
cpu_usage_metric
=
GaugeMetricFamily
(
'rds_cpu_usage'
,
'cpu usage of this rds instance'
,
labels
=
[
'rds_name'
])
disk_usage_metric
=
GaugeMetricFamily
(
'rds_disk_usage'
,
'disk usage of this rds instance'
,
labels
=
[
'rds_name'
])
iops_usage_metric
=
GaugeMetricFamily
(
'rds_iops_usage'
,
'iops usage'
,
labels
=
[
'rds_name'
])
connection_usage_metric
=
GaugeMetricFamily
(
'rds_connection_usage'
,
'connection usage'
,
labels
=
[
'rds_name'
])
# 'data_delay': ,
memory_usage_metric
=
GaugeMetricFamily
(
'rds_memory_usage'
,
'memory usage'
,
labels
=
[
'rds_name'
])
# 'mysql_network_in': ,
# 'mysql_network_out': ,
cpu_datapoints
=
aliyun_cms
.
get_rds_metric
(
rds_id
,
'CpuUsage'
)[
'Datapoints'
]
# disk_datapoints = aliyun_cms.get_rds_metric(rds_id, 'DiskUsage')['Datapoints']
# iops_datapoints = aliyun_cms.get_rds_metric(rds_id, 'IOPSUsage')['Datapoints']
# connection_datapoints = aliyun_cms.get_rds_metric(rds_id, 'ConnectionUsage')['Datapoints']
memory_datapoints
=
aliyun_cms
.
get_rds_metric
(
rds_id
,
'MemoryUsage'
)[
'Datapoints'
]
# datapoints按时间升序排列
for
p
in
cpu_datapoints
:
cpu_usage_metric
.
add_metric
([
rds_name
],
p
[
'Average'
],
p
[
'timestamp'
]
/
1000
)
# 阿里云SDK查询字段与prometheus metric name的映射
queryname_metricname
=
{
'CpuUsage'
:
'rds_cpu_usage'
,
'DiskUsage'
:
'rds_disk_usage'
,
}
metrics_from_aliyun
=
{}
all_rds_id
=
rds_instances
.
keys
()
# get metrics from aliyun
for
label
in
queryname_metricname
.
keys
():
metrics_from_aliyun
[
label
]
=
[]
for
i
in
range
(
0
,
int
(
math
.
ceil
(
len
(
all_rds_id
)
/
10.0
))):
instance_id_list
=
all_rds_id
[
10
*
i
:
10
*
(
i
+
1
)]
try
:
res
=
aliyun_cms
.
get_rds_metric
(
instance_id_list
,
label
)
metrics_from_aliyun
[
label
]
.
extend
(
res
[
'Datapoints'
])
except
Exception
as
e
:
print
(
e
)
# transform to prometheus metric format
for
queryname
,
datapoints
in
metrics_from_aliyun
.
items
():
metricname
=
queryname_metricname
[
queryname
]
metric_family
=
GaugeMetricFamily
(
metricname
,
metricname
,
labels
=
[
'rds_name'
])
for
point
in
datapoints
:
rds_name
=
rds_instances
[
point
[
'instanceId'
]]
metric_family
.
add_metric
([
rds_name
],
point
[
'Average'
],
point
[
'timestamp'
]
/
1000
)
# for p in disk_datapoints:
# disk_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# for p in iops_datapoints:
# iops_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
# for p in connection_datapoints:
# connection_usage_metric.add_metric([rds_name], p['Average'], p['timestamp']/1000)
for
p
in
memory_datapoints
:
memory_usage_metric
.
add_metric
([
rds_name
],
p
[
'Average'
],
p
[
'timestamp'
]
/
1000
)
# # 一次抓的多个时间升序排列的metric可同时被prometheus收集到
# current_time = time.time()
# for i in range(16, 0, -1):
# cpu_usage_metric.add_metric([rds_name], random.random(), current_time-i)
# disk_usage_metric.add_metric([rds_name], random.random(), current_time-i)
metrics
.
extend
([
cpu_usage_metric
,
disk_usage_metric
,
iops_usage_metric
,
connection_usage_metric
,
memory_usage_metric
,
])
metrics
.
append
(
metric_family
)
for
m
in
metrics
:
yield
m
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment