Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Removing mesos_tasks metrics. #1686

Merged
merged 3 commits into from
Sep 28, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 2 additions & 47 deletions plugins/inputs/mesos/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,10 @@ For more information, please check the [Mesos Observability Metrics](http://meso
# "tasks",
# "messages",
# ]
## Include mesos tasks statistics, default is false
# slave_tasks = true
```

By default this plugin is not configured to gather metrics from mesos. Since a mesos cluster can be deployed in numerous ways it does not provide any default
values. User needs to specify master/slave nodes this plugin will gather metrics from. Additionally, enabling `slave_tasks` will allow
gathering metrics from tasks running on specified slaves (this option is disabled by default).
values. User needs to specify master/slave nodes this plugin will gather metrics from.

### Measurements & Fields:

Expand Down Expand Up @@ -235,31 +232,6 @@ Mesos slave metric groups
- slave/valid_framework_messages
- slave/valid_status_updates

Mesos tasks metric groups

- executor_id
- executor_name
- framework_id
- source
- statistics
- cpus_limit
- cpus_system_time_secs
- cpus_user_time_secs
- mem_anon_bytes
- mem_cache_bytes
- mem_critical_pressure_counter
- mem_file_bytes
- mem_limit_bytes
- mem_low_pressure_counter
- mem_mapped_file_bytes
- mem_medium_pressure_counter
- mem_rss_bytes
- mem_swap_bytes
- mem_total_bytes
- mem_total_memsw_bytes
- mem_unevictable_bytes
- timestamp

### Tags:

- All master/slave measurements have the following tags:
Expand All @@ -269,16 +241,11 @@ Mesos tasks metric groups
- All master measurements have the extra tags:
- state (leader/follower)

- Tasks measurements have the following tags:
- server
- framework_id
- task_id

### Example Output:
```
$ telegraf -config ~/mesos.conf -input-filter mesos -test
* Plugin: mesos, Collection 1
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
mesos,role=master,state=leader,host=172.17.8.102,server=172.17.8.101
allocator/event_queue_dispatches=0,master/cpus_percent=0,
master/cpus_revocable_percent=0,master/cpus_revocable_total=0,
master/cpus_revocable_used=0,master/cpus_total=2,
Expand All @@ -297,15 +264,3 @@ master/mem_used=0,master/messages_authenticate=0,
master/messages_deactivate_framework=0 ...
```

Meoso tasks metrics (if enabled):
```
mesos-tasks,host=172.17.8.102,server=172.17.8.101,framework_id=e3060235-c4ed-4765-9d36-784e3beca07f-0000,task_id=hello-world.e4b5b497-2ccd-11e6-a659-0242fb222ce2
cpus_limit=0.2,cpus_system_time_secs=142.49,cpus_user_time_secs=388.14,
mem_anon_bytes=359129088,mem_cache_bytes=3964928,
mem_critical_pressure_counter=0,mem_file_bytes=3964928,
mem_limit_bytes=767557632,mem_low_pressure_counter=0,
mem_mapped_file_bytes=114688,mem_medium_pressure_counter=0,
mem_rss_bytes=359129088,mem_swap_bytes=0,mem_total_bytes=363094016,
mem_total_memsw_bytes=363094016,mem_unevictable_bytes=0,
timestamp=1465486052.70525 1465486053052811792...
```
25 changes: 12 additions & 13 deletions plugins/inputs/mesos/mesos.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ type Mesos struct {
MasterCols []string `toml:"master_collections"`
Slaves []string
SlaveCols []string `toml:"slave_collections"`
SlaveTasks bool
//SlaveTasks bool
}

var allMetrics = map[Role][]string{
Expand Down Expand Up @@ -66,8 +66,6 @@ var sampleConfig = `
# "tasks",
# "messages",
# ]
## Include mesos tasks statistics, default is false
# slave_tasks = true
`

// SampleConfig returns a sample configuration block
Expand Down Expand Up @@ -121,16 +119,16 @@ func (m *Mesos) Gather(acc telegraf.Accumulator) error {
return
}(v)

if !m.SlaveTasks {
continue
}
// if !m.SlaveTasks {
// continue
// }

wg.Add(1)
go func(c string) {
errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
wg.Done()
return
}(v)
// wg.Add(1)
// go func(c string) {
// errorChannel <- m.gatherSlaveTaskMetrics(c, ":5051", acc)
// wg.Done()
// return
// }(v)
}

wg.Wait()
Expand Down Expand Up @@ -459,7 +457,6 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
}

for _, task := range metrics {
tags["task_id"] = task.ExecutorID
tags["framework_id"] = task.FrameworkID

jf := jsonparser.JSONFlattener{}
Expand All @@ -468,7 +465,9 @@ func (m *Mesos) gatherSlaveTaskMetrics(address string, defaultPort string, acc t
if err != nil {
return err
}

timestamp := time.Unix(int64(jf.Fields["timestamp"].(float64)), 0)
jf.Fields["executor_id"] = task.ExecutorID

acc.AddFields("mesos_tasks", jf.Fields, tags, timestamp)
}
Expand Down
94 changes: 47 additions & 47 deletions plugins/inputs/mesos/mesos_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,14 @@ import (
"os"
"testing"

jsonparser "github.com/influxdata/telegraf/plugins/parsers/json"
"github.com/influxdata/telegraf/testutil"
)

var masterMetrics map[string]interface{}
var masterTestServer *httptest.Server
var slaveMetrics map[string]interface{}
var slaveTaskMetrics map[string]interface{}

// var slaveTaskMetrics map[string]interface{}
var slaveTestServer *httptest.Server

func randUUID() string {
Expand Down Expand Up @@ -216,31 +216,31 @@ func generateMetrics() {
slaveMetrics[k] = rand.Float64()
}

slaveTaskMetrics = map[string]interface{}{
"executor_id": fmt.Sprintf("task_%s", randUUID()),
"executor_name": "Some task description",
"framework_id": randUUID(),
"source": fmt.Sprintf("task_source_%s", randUUID()),
"statistics": map[string]interface{}{
"cpus_limit": rand.Float64(),
"cpus_system_time_secs": rand.Float64(),
"cpus_user_time_secs": rand.Float64(),
"mem_anon_bytes": float64(rand.Int63()),
"mem_cache_bytes": float64(rand.Int63()),
"mem_critical_pressure_counter": float64(rand.Int63()),
"mem_file_bytes": float64(rand.Int63()),
"mem_limit_bytes": float64(rand.Int63()),
"mem_low_pressure_counter": float64(rand.Int63()),
"mem_mapped_file_bytes": float64(rand.Int63()),
"mem_medium_pressure_counter": float64(rand.Int63()),
"mem_rss_bytes": float64(rand.Int63()),
"mem_swap_bytes": float64(rand.Int63()),
"mem_total_bytes": float64(rand.Int63()),
"mem_total_memsw_bytes": float64(rand.Int63()),
"mem_unevictable_bytes": float64(rand.Int63()),
"timestamp": rand.Float64(),
},
}
// slaveTaskMetrics = map[string]interface{}{
// "executor_id": fmt.Sprintf("task_name.%s", randUUID()),
// "executor_name": "Some task description",
// "framework_id": randUUID(),
// "source": fmt.Sprintf("task_source.%s", randUUID()),
// "statistics": map[string]interface{}{
// "cpus_limit": rand.Float64(),
// "cpus_system_time_secs": rand.Float64(),
// "cpus_user_time_secs": rand.Float64(),
// "mem_anon_bytes": float64(rand.Int63()),
// "mem_cache_bytes": float64(rand.Int63()),
// "mem_critical_pressure_counter": float64(rand.Int63()),
// "mem_file_bytes": float64(rand.Int63()),
// "mem_limit_bytes": float64(rand.Int63()),
// "mem_low_pressure_counter": float64(rand.Int63()),
// "mem_mapped_file_bytes": float64(rand.Int63()),
// "mem_medium_pressure_counter": float64(rand.Int63()),
// "mem_rss_bytes": float64(rand.Int63()),
// "mem_swap_bytes": float64(rand.Int63()),
// "mem_total_bytes": float64(rand.Int63()),
// "mem_total_memsw_bytes": float64(rand.Int63()),
// "mem_unevictable_bytes": float64(rand.Int63()),
// "timestamp": rand.Float64(),
// },
// }
}

func TestMain(m *testing.M) {
Expand All @@ -260,11 +260,11 @@ func TestMain(m *testing.M) {
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(slaveMetrics)
})
slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics})
})
// slaveRouter.HandleFunc("/monitor/statistics", func(w http.ResponseWriter, r *http.Request) {
// w.WriteHeader(http.StatusOK)
// w.Header().Set("Content-Type", "application/json")
// json.NewEncoder(w).Encode([]map[string]interface{}{slaveTaskMetrics})
// })
slaveTestServer = httptest.NewServer(slaveRouter)

rc := m.Run()
Expand Down Expand Up @@ -324,10 +324,10 @@ func TestMesosSlave(t *testing.T) {
var acc testutil.Accumulator

m := Mesos{
Masters: []string{},
Slaves: []string{slaveTestServer.Listener.Addr().String()},
SlaveTasks: true,
Timeout: 10,
Masters: []string{},
Slaves: []string{slaveTestServer.Listener.Addr().String()},
// SlaveTasks: true,
Timeout: 10,
}

err := m.Gather(&acc)
Expand All @@ -338,17 +338,17 @@ func TestMesosSlave(t *testing.T) {

acc.AssertContainsFields(t, "mesos", slaveMetrics)

jf := jsonparser.JSONFlattener{}
err = jf.FlattenJSON("", slaveTaskMetrics)

if err != nil {
t.Errorf(err.Error())
}

acc.AssertContainsFields(
t,
"mesos_tasks",
slaveTaskMetrics["statistics"].(map[string]interface{}))
// expectedFields := make(map[string]interface{}, len(slaveTaskMetrics["statistics"].(map[string]interface{}))+1)
// for k, v := range slaveTaskMetrics["statistics"].(map[string]interface{}) {
// expectedFields[k] = v
// }
// expectedFields["executor_id"] = slaveTaskMetrics["executor_id"]

// acc.AssertContainsTaggedFields(
// t,
// "mesos_tasks",
// expectedFields,
// map[string]string{"server": "127.0.0.1", "framework_id": slaveTaskMetrics["framework_id"].(string)})
}

func TestSlaveFilter(t *testing.T) {
Expand Down