-
Notifications
You must be signed in to change notification settings - Fork 93
API
Dieter Plaetinck edited this page Feb 27, 2014
·
1 revision
You can send some GEQL and get the result for it as json, like so:
http://localhost:8080/api/unit=Req/s aggro1 sum by device group by type
the response will be something like: (note that tags are 8000 shown as a string key and string value. but for tags being aggregated by, the value is a list of strings (each string being one value from a metric being aggregated)
{
"len_targets_matching": 36, # total amount of returned metrics, graphed targets can be less if they aggregate metrics
"errors": {},
"tags": [ # all tags seen amongst all targets in all graphs
"unit",
(...)
],
"len_graphs_all": 0,
"len_graphs_targets_matching": 2,
"len_graphs_matching": 0,
"graphs": [
[
"<generated graph identifier>",
{
"normal_targets": [],
"from": "-24hours",
"targets_avg_candidates": {},
"promoted_constants": { # tags that happened to be the same for all targets on this graph.
"device": [
"sum (9 vals, 9 uniqs)",
[
"dm-0",
"dm-1",
"dm-2",
"dm-3",
"xvda",
"xvda1",
"xvda2",
"xvdb",
"xvdb1"
]
],
"collectd_plugin": "disk",
"plugin": "collectd",
"server": "aggro1"
},
"targets": [
{
"id": [ # list if aggregate, string otherwise
"collectd.aggro1.disk.dm-0.disk_ops.write",
"collectd.aggro1.disk.dm-1.disk_ops.write",
"collectd.aggro1.disk.dm-2.disk_ops.write",
"collectd.aggro1.disk.dm-3.disk_ops.write",
"collectd.aggro1.disk.xvda.disk_ops.write",
"collectd.aggro1.disk.xvda1.disk_ops.write",
"collectd.aggro1.disk.xvda2.disk_ops.write",
"collectd.aggro1.disk.xvdb.disk_ops.write",
"collectd.aggro1.disk.xvdb1.disk_ops.write"
],
"match_buckets": {},
# the actual thing to be requested from graphite
"target": "sumSeries(collectd.aggro1.disk.dm-0.disk_ops.write,collectd.aggro1.disk.dm-1.disk_ops.write,collectd.aggro1.disk.dm-2.disk_ops.write,collectd.aggro1.disk.dm-3.disk_ops.write,collectd.aggro1.disk.xvda.disk_ops.write,collectd.aggro1.disk.xvda1.disk_ops.write,collectd.aggro1.disk.xvda2.disk_ops.write,collectd.aggro1.disk.xvdb.disk_ops.write,collectd.aggro1.disk.xvdb1.disk_ops.write)",
"variables": { # tags that have different values across targets on the same graph
"operation": "write"
},
"tags": { # all tags
"plugin": "collectd",
"collectd_plugin": "disk",
"target_type": "rate",
"server": "aggro1",
"device": [
"sum (9 vals, 9 uniqs)",
[
"dm-0",
"dm-1",
"dm-2",
"dm-3",
"xvda",
"xvda1",
"xvda2",
"xvdb",
"xvdb1"
]
],
"operation": "write",
"type": "executed",
"unit": "Req/s"
}
}
],
"targets_sum_candidates": {
"<aggregation key>": [
{
"variables": {
"operation": "write"
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.dm-0.disk_ops.write",
"id": "collectd.aggro1.disk.dm-0.disk_ops.write",
"tags": {
"plugin": "collectd",
"collectd_plugin": "disk",
"target_type": "rate",
"server": "aggro1",
"device": [
"sum (9 vals, 9 uniqs)",
[
"dm-0",
"dm-1",
"dm-2",
"dm-3",
"xvda",
"xvda1",
"xvda2",
"xvdb",
"xvdb1"
]
],
"operation": "write",
"type": "executed",
"unit": "Req/s"
}
},
{
"variables": {
"device": "dm-1",
"operation": "write",
"collectd_plugin": "disk",
"server": "aggro1",
"plugin": "collectd"
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.dm-1.disk_ops.write",
"id": "collectd.aggro1.disk.dm-1.disk_ops.write",
"tags": {
"plugin": "collectd",
"collectd_plugin": "disk",
"target_type": "rate",
"server": "aggro1",
"device": "dm-1",
"operation": "write",
"type": "executed",
"unit": "Req/s"
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.dm-2.disk_ops.write",
"id": "collectd.aggro1.disk.dm-2.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.dm-3.disk_ops.write",
"id": "collectd.aggro1.disk.dm-3.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.xvda.disk_ops.write",
"id": "collectd.aggro1.disk.xvda.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.xvda1.disk_ops.write",
"id": "collectd.aggro1.disk.xvda1.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.xvda2.disk_ops.write",
"id": "collectd.aggro1.disk.xvda2.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.xvdb.disk_ops.write",
"id": "collectd.aggro1.disk.xvdb.disk_ops.write",
"tags": {
<tags>
}
},
{
"variables": {
<tags>
},
"match_buckets": {
"device": ""
},
"target": "collectd.aggro1.disk.xvdb1.disk_ops.write",
"id": "collectd.aggro1.disk.xvdb1.disk_ops.write",
"tags": {
<tag key/values>
}
}
],
(...)
},
"until": "now",
"constants": {
"type": "merged",
"target_type": "rate",
"unit": "Req/s"
}
}
]
],
"len_graphs_matching_all": 2,
"targets_list": {},
"query": { # the query that was executed
"from": "-24hours",
"to": "now",
"min": null,
"max": null,
"avg_by": {},
"limit_targets": 500,
"avg_over": null,
"ast": [
"match_and",
[
"match_tag_exists",
"target_type"
],
[
"match_tag_exists",
"unit"
],
[
"match_or",
[
"match_tag_equality",
"unit",
"TReq/d"
],
[
"match_tag_equality",
"unit",
"PiReq/mo"
],
(...)
],
[
"match_id_regex",
"aggro1"
]
],
"patterns": [
"target_type=",
"unit=",
"unit=Req/s",
"aggro1"
],
"group_by": {
"target_type": [
""
],
"type": [
""
],
"unit": [
""
]
},
"sum_by": {
"device": [
""
]
},
"statement": "graph",
"limit_es": 10000
},
"len_targets_all": 364973
}