From 34e17f72823bc69ed9af8ed125ef3dca925244c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Torkel=20=C3=96degaard?= Date: Mon, 6 Jun 2016 17:11:46 +0200 Subject: [PATCH] feat(alerting): requests looks to be working again --- pkg/api/alerting.go | 4 +- pkg/api/api.go | 2 - pkg/api/dataproxy.go | 2 +- pkg/api/datasources.go | 6 +- pkg/api/dtos/alerting.go | 2 +- pkg/models/alerts.go | 4 +- pkg/models/datasource.go | 4 +- pkg/services/alerting/alerting.go | 9 +- pkg/services/alerting/dashboard_parser.go | 4 +- pkg/services/alerting/engine.go | 31 +-- pkg/services/alerting/executor.go | 74 ++++++-- pkg/services/alerting/executor_test.go | 16 +- pkg/services/alerting/models.go | 10 +- pkg/services/alerting/rule_reader.go | 2 +- pkg/services/alerting/scheduler.go | 2 +- .../sqlstore/alert_rule_changes_test.go | 2 +- pkg/services/sqlstore/alert_rule_test.go | 6 +- pkg/services/sqlstore/alert_state_test.go | 2 +- .../sqlstore/dashboard_parser_test.go | 4 +- pkg/services/sqlstore/datasource.go | 12 +- pkg/services/sqlstore/migrations/alert_mig.go | 4 +- pkg/tsdb/batch.go | 2 +- pkg/tsdb/executor.go | 6 +- pkg/tsdb/fake_test.go | 39 ++++ pkg/tsdb/graphite/graphite.go | 81 ++++++++ pkg/tsdb/graphite/graphite_test.go | 31 +++ pkg/tsdb/graphite/types.go | 6 + pkg/tsdb/models.go | 8 +- pkg/tsdb/request.go | 4 + pkg/tsdb/tsdb_test.go | 177 ++++++++++++++++++ .../alerting/partials/alert_list.html | 2 +- .../app/plugins/panel/graph/alert_tab_ctrl.ts | 4 +- .../panel/graph/partials/tab_alerting.html | 2 +- 33 files changed, 471 insertions(+), 93 deletions(-) create mode 100644 pkg/tsdb/fake_test.go create mode 100644 pkg/tsdb/graphite/graphite.go create mode 100644 pkg/tsdb/graphite/graphite_test.go create mode 100644 pkg/tsdb/graphite/types.go create mode 100644 pkg/tsdb/tsdb_test.go diff --git a/pkg/api/alerting.go b/pkg/api/alerting.go index 721d2fb8c1b..297a4b5fe59 100644 --- a/pkg/api/alerting.go +++ b/pkg/api/alerting.go @@ -22,7 +22,7 @@ func ValidateOrgAlert(c *middleware.Context) { } } -// GET /api/alerts/changes +// GET /api/alerting/changes func GetAlertChanges(c *middleware.Context) Response { query := models.GetAlertChangesQuery{ OrgId: c.OrgId, @@ -69,7 +69,7 @@ func GetAlerts(c *middleware.Context) Response { WarnLevel: alert.WarnLevel, CritLevel: alert.CritLevel, Frequency: alert.Frequency, - Title: alert.Title, + Name: alert.Name, Description: alert.Description, QueryRange: alert.QueryRange, Aggregator: alert.Aggregator, diff --git a/pkg/api/api.go b/pkg/api/api.go index f308afc0837..4c23647082d 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -244,9 +244,7 @@ func Register(r *macaron.Macaron) { r.Group("/alerts", func() { r.Group("/rules", func() { r.Get("/:alertId/states", wrap(GetAlertStates)) - r.Put("/:alertId/state", bind(m.UpdateAlertStateCommand{}), wrap(PutAlertState)) - r.Get("/:alertId", ValidateOrgAlert, wrap(GetAlert)) //r.Delete("/:alertId", ValidateOrgAlert, wrap(DelAlert)) disabled until we know how to handle it dashboard updates r.Get("/", wrap(GetAlerts)) diff --git a/pkg/api/dataproxy.go b/pkg/api/dataproxy.go index 871212adc6f..8c2e134f3af 100644 --- a/pkg/api/dataproxy.go +++ b/pkg/api/dataproxy.go @@ -77,7 +77,7 @@ func getDatasource(id int64, orgId int64) (*m.DataSource, error) { return nil, err } - return &query.Result, nil + return query.Result, nil } func ProxyDataSourceRequest(c *middleware.Context) { diff --git a/pkg/api/datasources.go b/pkg/api/datasources.go index 63c2ec57b7a..62a83bdaa0b 100644 --- a/pkg/api/datasources.go +++ b/pkg/api/datasources.go @@ -123,9 +123,7 @@ func GetDataSourceByName(c *middleware.Context) Response { return ApiError(500, "Failed to query datasources", err) } - ds := query.Result - dtos := convertModelToDtos(ds) - + dtos := convertModelToDtos(query.Result) return Json(200, &dtos) } @@ -148,7 +146,7 @@ func GetDataSourceIdByName(c *middleware.Context) Response { return Json(200, &dtos) } -func convertModelToDtos(ds m.DataSource) dtos.DataSource { +func convertModelToDtos(ds *m.DataSource) dtos.DataSource { return dtos.DataSource{ Id: ds.Id, OrgId: ds.OrgId, diff --git a/pkg/api/dtos/alerting.go b/pkg/api/dtos/alerting.go index 5fc2dbc371b..2db3878f7e9 100644 --- a/pkg/api/dtos/alerting.go +++ b/pkg/api/dtos/alerting.go @@ -11,7 +11,7 @@ type AlertRuleDTO struct { WarnOperator string `json:"warnOperator"` CritOperator string `json:"critOperator"` Frequency int64 `json:"frequency"` - Title string `json:"title"` + Name string `json:"name"` Description string `json:"description"` QueryRange int `json:"queryRange"` Aggregator string `json:"aggregator"` diff --git a/pkg/models/alerts.go b/pkg/models/alerts.go index 17a60a8f029..b98eef598f4 100644 --- a/pkg/models/alerts.go +++ b/pkg/models/alerts.go @@ -17,7 +17,7 @@ type AlertRule struct { WarnOperator string `json:"warnOperator"` CritOperator string `json:"critOperator"` Frequency int64 `json:"frequency"` - Title string `json:"title"` + Name string `json:"name"` Description string `json:"description"` QueryRange int `json:"queryRange"` Aggregator string `json:"aggregator"` @@ -38,7 +38,7 @@ func (this *AlertRule) Equals(other *AlertRule) bool { result = result || this.Query != other.Query result = result || this.QueryRefId != other.QueryRefId result = result || this.Frequency != other.Frequency - result = result || this.Title != other.Title + result = result || this.Name != other.Name result = result || this.Description != other.Description result = result || this.QueryRange != other.QueryRange //don't compare .State! That would be insane. diff --git a/pkg/models/datasource.go b/pkg/models/datasource.go index 2e9d98e9700..794266ba71e 100644 --- a/pkg/models/datasource.go +++ b/pkg/models/datasource.go @@ -131,13 +131,13 @@ type GetDataSourcesQuery struct { type GetDataSourceByIdQuery struct { Id int64 OrgId int64 - Result DataSource + Result *DataSource } type GetDataSourceByNameQuery struct { Name string OrgId int64 - Result DataSource + Result *DataSource } // --------------------- diff --git a/pkg/services/alerting/alerting.go b/pkg/services/alerting/alerting.go index 31c196b3f94..2aacb7d49ad 100644 --- a/pkg/services/alerting/alerting.go +++ b/pkg/services/alerting/alerting.go @@ -5,6 +5,7 @@ import ( "github.com/grafana/grafana/pkg/log" m "github.com/grafana/grafana/pkg/models" "github.com/grafana/grafana/pkg/setting" + _ "github.com/grafana/grafana/pkg/tsdb/graphite" ) var ( @@ -31,11 +32,11 @@ func Init() { // go scheduler.handleResponses() } -func saveState(response *AlertResult) { +func saveState(result *AlertResult) { cmd := &m.UpdateAlertStateCommand{ - AlertId: response.Id, - NewState: response.State, - Info: response.Description, + AlertId: result.AlertJob.Rule.Id, + NewState: result.State, + Info: result.Description, } if err := bus.Dispatch(cmd); err != nil { diff --git a/pkg/services/alerting/dashboard_parser.go b/pkg/services/alerting/dashboard_parser.go index 88f6e7f8b0d..73b9063fa42 100644 --- a/pkg/services/alerting/dashboard_parser.go +++ b/pkg/services/alerting/dashboard_parser.go @@ -3,6 +3,7 @@ package alerting import ( "github.com/grafana/grafana/pkg/bus" "github.com/grafana/grafana/pkg/components/simplejson" + "github.com/grafana/grafana/pkg/log" m "github.com/grafana/grafana/pkg/models" ) @@ -27,12 +28,13 @@ func ParseAlertsFromDashboard(cmd *m.SaveDashboardCommand) []*m.AlertRule { WarnOperator: alerting.Get("warnOperator").MustString(), CritOperator: alerting.Get("critOperator").MustString(), Frequency: alerting.Get("frequency").MustInt64(), - Title: alerting.Get("title").MustString(), + Name: alerting.Get("name").MustString(), Description: alerting.Get("description").MustString(), QueryRange: alerting.Get("queryRange").MustInt(), Aggregator: alerting.Get("aggregator").MustString(), } + log.Info("Alertrule: %v", alert.Name) for _, targetsObj := range panel.Get("targets").MustArray() { target := simplejson.NewFromAny(targetsObj) diff --git a/pkg/services/alerting/engine.go b/pkg/services/alerting/engine.go index 4d85d375ab5..380e67a75cc 100644 --- a/pkg/services/alerting/engine.go +++ b/pkg/services/alerting/engine.go @@ -33,7 +33,7 @@ func NewEngine() *Engine { } func (e *Engine) Start() { - log.Info("Alerting: Engine.Start()") + log.Info("Alerting: engine.Start()") go e.alertingTicker() go e.execDispatch() @@ -51,13 +51,12 @@ func (e *Engine) alertingTicker() { for { select { case tick := <-e.ticker.C: - // update rules ever tenth tick + // TEMP SOLUTION update rules ever tenth tick if tickIndex%10 == 0 { e.scheduler.Update(e.ruleReader.Fetch()) } e.scheduler.Tick(tick, e.execQueue) - tickIndex++ } } @@ -65,7 +64,7 @@ func (e *Engine) alertingTicker() { func (e *Engine) execDispatch() { for job := range e.execQueue { - log.Trace("Alerting: Engine:execDispatch() starting job %s", job.Rule.Title) + log.Trace("Alerting: engine:execDispatch() starting job %s", job.Rule.Name) job.Running = true e.executeJob(job) } @@ -80,33 +79,39 @@ func (e *Engine) executeJob(job *AlertJob) { select { case <-time.After(time.Second * 5): e.resultQueue <- &AlertResult{ - Id: job.Rule.Id, State: alertstates.Pending, Duration: float64(time.Since(now).Nanoseconds()) / float64(1000000), + Error: fmt.Errorf("Timeout"), AlertJob: job, } + log.Trace("Alerting: engine.executeJob(): timeout") case result := <-resultChan: result.Duration = float64(time.Since(now).Nanoseconds()) / float64(1000000) - log.Trace("Alerting: engine.executeJob(): exeuction took %vms", result.Duration) + log.Trace("Alerting: engine.executeJob(): done %vms", result.Duration) e.resultQueue <- result } } func (e *Engine) resultHandler() { for result := range e.resultQueue { - log.Debug("Alerting: engine.resultHandler(): alert(%d) status(%s) actual(%v) retry(%d)", result.Id, result.State, result.ActualValue, result.AlertJob.RetryCount) + log.Debug("Alerting: engine.resultHandler(): alert(%d) status(%s) actual(%v) retry(%d)", result.AlertJob.Rule.Id, result.State, result.ActualValue, result.AlertJob.RetryCount) + result.AlertJob.Running = false - if result.IsResultIncomplete() { + // handle result error + if result.Error != nil { result.AlertJob.RetryCount++ + if result.AlertJob.RetryCount < maxRetries { + log.Error(3, "Alerting: Rule('%s') Result Error: %v, Retrying..", result.AlertJob.Rule.Name, result.Error) + e.execQueue <- result.AlertJob } else { - saveState(&AlertResult{ - Id: result.Id, - State: alertstates.Critical, - Description: fmt.Sprintf("Failed to run check after %d retires", maxRetries), - }) + log.Error(3, "Alerting: Rule('%s') Result Error: %v, Max retries reached", result.AlertJob.Rule.Name, result.Error) + + result.State = alertstates.Critical + result.Description = fmt.Sprintf("Failed to run check after %d retires, Error: %v", maxRetries, result.Error) + saveState(result) } } else { result.AlertJob.RetryCount = 0 diff --git a/pkg/services/alerting/executor.go b/pkg/services/alerting/executor.go index 2fc2d662230..b7d43d89489 100644 --- a/pkg/services/alerting/executor.go +++ b/pkg/services/alerting/executor.go @@ -2,6 +2,7 @@ package alerting import ( "fmt" + "strconv" "math" @@ -78,38 +79,79 @@ var aggregator = map[string]aggregationFn{ } func (e *ExecutorImpl) Execute(job *AlertJob, resultQueue chan *AlertResult) { - response, err := e.GetSeries(job) - + timeSeries, err := e.executeQuery(job) if err != nil { - resultQueue <- &AlertResult{State: alertstates.Pending, Id: job.Rule.Id, AlertJob: job} + resultQueue <- &AlertResult{ + Error: err, + State: alertstates.Pending, + AlertJob: job, + } } - result := e.validateRule(job.Rule, response) + result := e.evaluateRule(job.Rule, timeSeries) result.AlertJob = job resultQueue <- result } -func (e *ExecutorImpl) GetSeries(job *AlertJob) (tsdb.TimeSeriesSlice, error) { - query := &m.GetDataSourceByIdQuery{ +func (e *ExecutorImpl) executeQuery(job *AlertJob) (tsdb.TimeSeriesSlice, error) { + getDsInfo := &m.GetDataSourceByIdQuery{ Id: job.Rule.DatasourceId, OrgId: job.Rule.OrgId, } - err := bus.Dispatch(query) - - if err != nil { + if err := bus.Dispatch(getDsInfo); err != nil { return nil, fmt.Errorf("Could not find datasource for %d", job.Rule.DatasourceId) } - // if query.Result.Type == m.DS_GRAPHITE { - // return GraphiteClient{}.GetSeries(*job, query.Result) - // } + req := e.GetRequestForAlertRule(job.Rule, getDsInfo.Result) + result := make(tsdb.TimeSeriesSlice, 0) - return nil, fmt.Errorf("Grafana does not support alerts for %s", query.Result.Type) + resp, err := tsdb.HandleRequest(req) + if err != nil { + return nil, fmt.Errorf("Alerting: GetSeries() tsdb.HandleRequest() error %v", err) + } + + for _, v := range resp.Results { + if v.Error != nil { + return nil, fmt.Errorf("Alerting: GetSeries() tsdb.HandleRequest() response error %v", v) + } + + result = append(result, v.Series...) + } + + return result, nil } -func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice) *AlertResult { +func (e *ExecutorImpl) GetRequestForAlertRule(rule *AlertRule, datasource *m.DataSource) *tsdb.Request { + + req := &tsdb.Request{ + TimeRange: tsdb.TimeRange{ + From: "-" + strconv.Itoa(rule.QueryRange) + "s", + To: "now", + }, + Queries: tsdb.QuerySlice{ + &tsdb.Query{ + RefId: rule.QueryRefId, + Query: rule.Query, + DataSource: &tsdb.DataSourceInfo{ + Id: datasource.Id, + Name: datasource.Name, + PluginId: datasource.Type, + Url: datasource.Url, + }, + }, + }, + } + + return req +} + +func (e *ExecutorImpl) evaluateRule(rule *AlertRule, series tsdb.TimeSeriesSlice) *AlertResult { + log.Trace("Alerting: executor.evaluateRule: %v, query result: series: %v", rule.Name, len(series)) + for _, serie := range series { + log.Info("Alerting: executor.validate: %v", serie.Name) + if aggregator[rule.Aggregator] == nil { continue } @@ -122,7 +164,6 @@ func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice if critResult { return &AlertResult{ State: alertstates.Critical, - Id: rule.Id, ActualValue: aggValue, Description: fmt.Sprintf(descriptionFmt, aggValue, serie.Name), } @@ -134,12 +175,11 @@ func (e *ExecutorImpl) validateRule(rule *AlertRule, series tsdb.TimeSeriesSlice if warnResult { return &AlertResult{ State: alertstates.Warn, - Id: rule.Id, Description: fmt.Sprintf(descriptionFmt, aggValue, serie.Name), ActualValue: aggValue, } } } - return &AlertResult{State: alertstates.Ok, Id: rule.Id, Description: "Alert is OK!"} + return &AlertResult{State: alertstates.Ok, Description: "Alert is OK!"} } diff --git a/pkg/services/alerting/executor_test.go b/pkg/services/alerting/executor_test.go index d7ac67ba631..9e3a5d64e21 100644 --- a/pkg/services/alerting/executor_test.go +++ b/pkg/services/alerting/executor_test.go @@ -20,7 +20,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Ok) }) @@ -31,7 +31,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Critical) }) @@ -42,7 +42,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{9, 0}, {9, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Critical) }) @@ -53,7 +53,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{9, 0}, {9, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Ok) }) @@ -64,7 +64,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{11, 0}, {9, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Ok) }) @@ -75,7 +75,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{1, 0}, {11, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Critical) }) }) @@ -89,7 +89,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{2, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Ok) }) @@ -101,7 +101,7 @@ func TestAlertingExecutor(t *testing.T) { tsdb.NewTimeSeries("test1", [][2]float64{{11, 0}}), } - result := executor.validateRule(rule, timeSeries) + result := executor.evaluateRule(rule, timeSeries) So(result.State, ShouldEqual, alertstates.Critical) }) }) diff --git a/pkg/services/alerting/models.go b/pkg/services/alerting/models.go index 0a8224c0cf0..7b0fb616a1c 100644 --- a/pkg/services/alerting/models.go +++ b/pkg/services/alerting/models.go @@ -1,7 +1,5 @@ package alerting -import "github.com/grafana/grafana/pkg/services/alerting/alertstates" - type AlertJob struct { Offset int64 Delay bool @@ -11,18 +9,14 @@ type AlertJob struct { } type AlertResult struct { - Id int64 State string ActualValue float64 Duration float64 Description string + Error error AlertJob *AlertJob } -func (ar *AlertResult) IsResultIncomplete() bool { - return ar.State == alertstates.Pending -} - type AlertRule struct { Id int64 OrgId int64 @@ -36,7 +30,7 @@ type AlertRule struct { WarnOperator string CritOperator string Frequency int64 - Title string + Name string Description string QueryRange int Aggregator string diff --git a/pkg/services/alerting/rule_reader.go b/pkg/services/alerting/rule_reader.go index 734c7504b5c..9279df28cc8 100644 --- a/pkg/services/alerting/rule_reader.go +++ b/pkg/services/alerting/rule_reader.go @@ -60,7 +60,7 @@ func (arr *AlertRuleReader) Fetch() []*AlertRule { model.CritLevel = ruleDef.CritLevel model.CritOperator = ruleDef.CritOperator model.Frequency = ruleDef.Frequency - model.Title = ruleDef.Title + model.Name = ruleDef.Name model.Description = ruleDef.Description model.Aggregator = ruleDef.Aggregator model.State = ruleDef.State diff --git a/pkg/services/alerting/scheduler.go b/pkg/services/alerting/scheduler.go index ae94461fe4f..ffa2b2b900c 100644 --- a/pkg/services/alerting/scheduler.go +++ b/pkg/services/alerting/scheduler.go @@ -47,7 +47,7 @@ func (s *SchedulerImpl) Tick(tickTime time.Time, execQueue chan *AlertJob) { for _, job := range s.jobs { if now%job.Rule.Frequency == 0 && job.Running == false { - log.Trace("Scheduler: Putting job on to exec queue: %s", job.Rule.Title) + log.Trace("Scheduler: Putting job on to exec queue: %s", job.Rule.Name) execQueue <- job } } diff --git a/pkg/services/sqlstore/alert_rule_changes_test.go b/pkg/services/sqlstore/alert_rule_changes_test.go index da25d4fd23a..dff2b7853b1 100644 --- a/pkg/services/sqlstore/alert_rule_changes_test.go +++ b/pkg/services/sqlstore/alert_rule_changes_test.go @@ -31,7 +31,7 @@ func TestAlertRuleChangesDataAccess(t *testing.T) { WarnOperator: ">", CritOperator: ">", Frequency: 10, - Title: "Alerting title", + Name: "Alerting title", Description: "Alerting description", QueryRange: 3600, Aggregator: "avg", diff --git a/pkg/services/sqlstore/alert_rule_test.go b/pkg/services/sqlstore/alert_rule_test.go index 7372e7caacf..2ab839840ed 100644 --- a/pkg/services/sqlstore/alert_rule_test.go +++ b/pkg/services/sqlstore/alert_rule_test.go @@ -26,7 +26,7 @@ func TestAlertingDataAccess(t *testing.T) { WarnOperator: ">", CritOperator: ">", Frequency: 10, - Title: "Alerting title", + Name: "Alerting title", Description: "Alerting description", QueryRange: 3600, Aggregator: "avg", @@ -65,7 +65,7 @@ func TestAlertingDataAccess(t *testing.T) { So(alert.CritOperator, ShouldEqual, ">") So(alert.Query, ShouldEqual, "Query") So(alert.QueryRefId, ShouldEqual, "A") - So(alert.Title, ShouldEqual, "Alerting title") + So(alert.Name, ShouldEqual, "Alerting title") So(alert.Description, ShouldEqual, "Alerting description") So(alert.QueryRange, ShouldEqual, 3600) So(alert.Aggregator, ShouldEqual, "avg") @@ -189,7 +189,7 @@ func TestAlertingDataAccess(t *testing.T) { WarnOperator: ">", CritOperator: ">", Frequency: 10, - Title: "Alerting title", + Name: "Alerting title", Description: "Alerting description", QueryRange: 3600, Aggregator: "avg", diff --git a/pkg/services/sqlstore/alert_state_test.go b/pkg/services/sqlstore/alert_state_test.go index 72fa2f4c78c..2389fc43a18 100644 --- a/pkg/services/sqlstore/alert_state_test.go +++ b/pkg/services/sqlstore/alert_state_test.go @@ -25,7 +25,7 @@ func TestAlertingStateAccess(t *testing.T) { WarnOperator: ">", CritOperator: ">", Frequency: 10, - Title: "Alerting title", + Name: "Alerting title", Description: "Alerting description", QueryRange: 3600, Aggregator: "avg", diff --git a/pkg/services/sqlstore/dashboard_parser_test.go b/pkg/services/sqlstore/dashboard_parser_test.go index 5a7e3433e44..331e9f58478 100644 --- a/pkg/services/sqlstore/dashboard_parser_test.go +++ b/pkg/services/sqlstore/dashboard_parser_test.go @@ -110,7 +110,7 @@ func TestAlertModel(t *testing.T) { "aggregator": "sum", "queryRange": "10m", "frequency": 10, - "title": "active desktop users", + "name": "active desktop users", "description": "restart webservers" }, "links": [] @@ -386,7 +386,7 @@ func TestAlertModel(t *testing.T) { So(v.Query, ShouldNotBeEmpty) So(v.QueryRefId, ShouldNotBeEmpty) So(v.QueryRange, ShouldNotBeEmpty) - So(v.Title, ShouldNotBeEmpty) + So(v.Name, ShouldNotBeEmpty) So(v.Description, ShouldNotBeEmpty) } diff --git a/pkg/services/sqlstore/datasource.go b/pkg/services/sqlstore/datasource.go index 55a95413640..c028fd0fccc 100644 --- a/pkg/services/sqlstore/datasource.go +++ b/pkg/services/sqlstore/datasource.go @@ -19,22 +19,26 @@ func init() { } func GetDataSourceById(query *m.GetDataSourceByIdQuery) error { - sess := x.Limit(100, 0).Where("org_id=? AND id=?", query.OrgId, query.Id) - has, err := sess.Get(&query.Result) + datasource := m.DataSource{OrgId: query.OrgId, Id: query.Id} + has, err := x.Get(&datasource) if !has { return m.ErrDataSourceNotFound } + + query.Result = &datasource return err } func GetDataSourceByName(query *m.GetDataSourceByNameQuery) error { - sess := x.Limit(100, 0).Where("org_id=? AND name=?", query.OrgId, query.Name) - has, err := sess.Get(&query.Result) + datasource := m.DataSource{OrgId: query.OrgId, Name: query.Name} + has, err := x.Get(&datasource) if !has { return m.ErrDataSourceNotFound } + + query.Result = &datasource return err } diff --git a/pkg/services/sqlstore/migrations/alert_mig.go b/pkg/services/sqlstore/migrations/alert_mig.go index 023829887b3..e5b5d783886 100644 --- a/pkg/services/sqlstore/migrations/alert_mig.go +++ b/pkg/services/sqlstore/migrations/alert_mig.go @@ -21,7 +21,7 @@ func addAlertMigrations(mg *Migrator) { {Name: "crit_level", Type: DB_Float, Nullable: false}, {Name: "crit_operator", Type: DB_NVarchar, Length: 10, Nullable: false}, {Name: "frequency", Type: DB_BigInt, Nullable: false}, - {Name: "title", Type: DB_NVarchar, Length: 255, Nullable: false}, + {Name: "name", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "description", Type: DB_NVarchar, Length: 255, Nullable: false}, {Name: "query_range", Type: DB_Int, Nullable: false}, {Name: "aggregator", Type: DB_NVarchar, Length: 255, Nullable: false}, @@ -32,7 +32,7 @@ func addAlertMigrations(mg *Migrator) { } // create table - mg.AddMigration("create alert_rule table v1", NewAddTableMigration(alertV1)) + mg.AddMigration("create alert_rule table v2", NewAddTableMigration(alertV1)) alert_changes := Table{ Name: "alert_rule_change", diff --git a/pkg/tsdb/batch.go b/pkg/tsdb/batch.go index 92aa1afd2f8..bc16ed1e75a 100644 --- a/pkg/tsdb/batch.go +++ b/pkg/tsdb/batch.go @@ -26,7 +26,7 @@ func (bg *Batch) process(context *QueryContext) { if executor == nil { bg.Done = true result := &BatchResult{ - Error: errors.New("Could not find executor for data source type " + bg.Queries[0].DataSource.Type), + Error: errors.New("Could not find executor for data source type " + bg.Queries[0].DataSource.PluginId), QueryResults: make(map[string]*QueryResult), } for _, query := range bg.Queries { diff --git a/pkg/tsdb/executor.go b/pkg/tsdb/executor.go index 7317fde23f2..b39c2cdaa97 100644 --- a/pkg/tsdb/executor.go +++ b/pkg/tsdb/executor.go @@ -13,12 +13,12 @@ func init() { } func getExecutorFor(dsInfo *DataSourceInfo) Executor { - if fn, exists := registry[dsInfo.Type]; exists { + if fn, exists := registry[dsInfo.PluginId]; exists { return fn(dsInfo) } return nil } -func RegisterExecutor(dsType string, fn GetExecutorFn) { - registry[dsType] = fn +func RegisterExecutor(pluginId string, fn GetExecutorFn) { + registry[pluginId] = fn } diff --git a/pkg/tsdb/fake_test.go b/pkg/tsdb/fake_test.go new file mode 100644 index 00000000000..2ba02792d6d --- /dev/null +++ b/pkg/tsdb/fake_test.go @@ -0,0 +1,39 @@ +package tsdb + +type FakeExecutor struct { + results map[string]*QueryResult + resultsFn map[string]ResultsFn +} + +type ResultsFn func(context *QueryContext) *QueryResult + +func NewFakeExecutor(dsInfo *DataSourceInfo) *FakeExecutor { + return &FakeExecutor{ + results: make(map[string]*QueryResult), + resultsFn: make(map[string]ResultsFn), + } +} + +func (e *FakeExecutor) Execute(queries QuerySlice, context *QueryContext) *BatchResult { + result := &BatchResult{QueryResults: make(map[string]*QueryResult)} + for _, query := range queries { + if results, has := e.results[query.RefId]; has { + result.QueryResults[query.RefId] = results + } + if testFunc, has := e.resultsFn[query.RefId]; has { + result.QueryResults[query.RefId] = testFunc(context) + } + } + + return result +} + +func (e *FakeExecutor) Return(refId string, series TimeSeriesSlice) { + e.results[refId] = &QueryResult{ + RefId: refId, Series: series, + } +} + +func (e *FakeExecutor) HandleQuery(refId string, fn ResultsFn) { + e.resultsFn[refId] = fn +} diff --git a/pkg/tsdb/graphite/graphite.go b/pkg/tsdb/graphite/graphite.go new file mode 100644 index 00000000000..9a5d05ab3b2 --- /dev/null +++ b/pkg/tsdb/graphite/graphite.go @@ -0,0 +1,81 @@ +package graphite + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "time" + + "github.com/Unknwon/log" + "github.com/grafana/grafana/pkg/components/simplejson" + "github.com/grafana/grafana/pkg/tsdb" +) + +type GraphiteExecutor struct { + *tsdb.DataSourceInfo +} + +func NewGraphiteExecutor(dsInfo *tsdb.DataSourceInfo) tsdb.Executor { + return &GraphiteExecutor{dsInfo} +} + +func init() { + tsdb.RegisterExecutor("graphite", NewGraphiteExecutor) +} + +func (e *GraphiteExecutor) Execute(queries tsdb.QuerySlice, context *tsdb.QueryContext) *tsdb.BatchResult { + result := &tsdb.BatchResult{} + + params := url.Values{ + "from": []string{context.TimeRange.From}, + "until": []string{context.TimeRange.To}, + "format": []string{"json"}, + "maxDataPoints": []string{"500"}, + } + + for _, query := range queries { + params["target"] = []string{ + getTargetFromQuery(query.Query), + } + } + + client := http.Client{Timeout: time.Duration(10 * time.Second)} + res, err := client.PostForm(e.Url+"/render?", params) + if err != nil { + result.Error = err + return result + } + defer res.Body.Close() + + body, err := ioutil.ReadAll(res.Body) + if err != nil { + result.Error = err + return result + } + + var data []TargetResponseDTO + err = json.Unmarshal(body, &data) + if err != nil { + log.Info("Error: %v", string(body)) + result.Error = err + return result + } + + result.QueryResults = make(map[string]*tsdb.QueryResult) + queryRes := &tsdb.QueryResult{} + for _, series := range data { + queryRes.Series = append(queryRes.Series, &tsdb.TimeSeries{ + Name: series.Target, + Points: series.DataPoints, + }) + } + + result.QueryResults["A"] = queryRes + return result +} + +func getTargetFromQuery(query string) string { + json, _ := simplejson.NewJson([]byte(query)) + return json.Get("target").MustString() +} diff --git a/pkg/tsdb/graphite/graphite_test.go b/pkg/tsdb/graphite/graphite_test.go new file mode 100644 index 00000000000..927c2996e24 --- /dev/null +++ b/pkg/tsdb/graphite/graphite_test.go @@ -0,0 +1,31 @@ +package graphite + +import ( + "testing" + + . "github.com/smartystreets/goconvey/convey" + + "github.com/grafana/grafana/pkg/tsdb" +) + +func TestGraphite(t *testing.T) { + + Convey("When executing graphite query", t, func() { + executor := NewGraphiteExecutor(&tsdb.DataSourceInfo{ + Url: "http://localhost:8080", + }) + + queries := tsdb.QuerySlice{ + &tsdb.Query{Query: "apps.backend.*.counters.requests.count"}, + } + context := tsdb.NewQueryContext(queries, tsdb.TimeRange{}) + + result := executor.Execute(queries, context) + So(result.Error, ShouldBeNil) + + Convey("Should return series", func() { + So(result.QueryResults, ShouldNotBeEmpty) + }) + }) + +} diff --git a/pkg/tsdb/graphite/types.go b/pkg/tsdb/graphite/types.go new file mode 100644 index 00000000000..4cd1b601bbc --- /dev/null +++ b/pkg/tsdb/graphite/types.go @@ -0,0 +1,6 @@ +package graphite + +type TargetResponseDTO struct { + Target string `json:"target"` + DataPoints [][2]float64 `json:"datapoints"` +} diff --git a/pkg/tsdb/models.go b/pkg/tsdb/models.go index e47d49ce6cf..29e0ff2cd32 100644 --- a/pkg/tsdb/models.go +++ b/pkg/tsdb/models.go @@ -1,10 +1,8 @@ package tsdb -import "time" - type TimeRange struct { - From time.Time - To time.Time + From string + To string } type Request struct { @@ -21,7 +19,7 @@ type Response struct { type DataSourceInfo struct { Id int64 Name string - Type string + PluginId string Url string Password string User string diff --git a/pkg/tsdb/request.go b/pkg/tsdb/request.go index 3e7654bb958..2c96a3ff3ce 100644 --- a/pkg/tsdb/request.go +++ b/pkg/tsdb/request.go @@ -27,6 +27,10 @@ func HandleRequest(req *Request) (*Response, error) { response.BatchTimings = append(response.BatchTimings, batchResult.Timings) + if batchResult.Error != nil { + return nil, batchResult.Error + } + for refId, result := range batchResult.QueryResults { context.Results[refId] = result } diff --git a/pkg/tsdb/tsdb_test.go b/pkg/tsdb/tsdb_test.go new file mode 100644 index 00000000000..7467255882d --- /dev/null +++ b/pkg/tsdb/tsdb_test.go @@ -0,0 +1,177 @@ +package tsdb + +import ( + "testing" + "time" + + . "github.com/smartystreets/goconvey/convey" +) + +func TestMetricQuery(t *testing.T) { + + Convey("When batches groups for query", t, func() { + + Convey("Given 3 queries for 2 data sources", func() { + request := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1}}, + {RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1}}, + {RefId: "C", Query: "asd", DataSource: &DataSourceInfo{Id: 2}}, + }, + } + + batches, err := getBatches(request) + So(err, ShouldBeNil) + + Convey("Should group into two batches", func() { + So(len(batches), ShouldEqual, 2) + }) + }) + + Convey("Given query 2 depends on query 1", func() { + request := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1}}, + {RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 2}}, + {RefId: "C", Query: "#A / #B", DataSource: &DataSourceInfo{Id: 3}, Depends: []string{"A", "B"}}, + }, + } + + batches, err := getBatches(request) + So(err, ShouldBeNil) + + Convey("Should return three batch groups", func() { + So(len(batches), ShouldEqual, 3) + }) + + Convey("Group 3 should have group 1 and 2 as dependencies", func() { + So(batches[2].Depends["A"], ShouldEqual, true) + So(batches[2].Depends["B"], ShouldEqual, true) + }) + + }) + }) + + Convey("When executing request with one query", t, func() { + req := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + }, + } + + fakeExecutor := registerFakeExecutor() + fakeExecutor.Return("A", TimeSeriesSlice{&TimeSeries{Name: "argh"}}) + + res, err := HandleRequest(req) + So(err, ShouldBeNil) + + Convey("Should return query results", func() { + So(res.Results["A"].Series, ShouldNotBeEmpty) + So(res.Results["A"].Series[0].Name, ShouldEqual, "argh") + }) + }) + + Convey("When executing one request with two queries from same data source", t, func() { + req := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + {RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + }, + } + + fakeExecutor := registerFakeExecutor() + fakeExecutor.Return("A", TimeSeriesSlice{&TimeSeries{Name: "argh"}}) + fakeExecutor.Return("B", TimeSeriesSlice{&TimeSeries{Name: "barg"}}) + + res, err := HandleRequest(req) + So(err, ShouldBeNil) + + Convey("Should return query results", func() { + So(len(res.Results), ShouldEqual, 2) + So(res.Results["B"].Series[0].Name, ShouldEqual, "barg") + }) + + Convey("Should have been batched in one request", func() { + So(len(res.BatchTimings), ShouldEqual, 1) + }) + + }) + + Convey("When executing one request with three queries from different datasources", t, func() { + req := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + {RefId: "B", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + {RefId: "C", Query: "asd", DataSource: &DataSourceInfo{Id: 2, Type: "test"}}, + }, + } + + res, err := HandleRequest(req) + So(err, ShouldBeNil) + + Convey("Should have been batched in two requests", func() { + So(len(res.BatchTimings), ShouldEqual, 2) + }) + }) + + Convey("When query uses data source of unknown type", t, func() { + req := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "asdasdas"}}, + }, + } + + res, err := HandleRequest(req) + So(err, ShouldBeNil) + + Convey("Should return error", func() { + So(res.Results["A"].Error.Error(), ShouldContainSubstring, "not find") + }) + }) + + Convey("When executing request that depend on other query", t, func() { + req := &Request{ + Queries: QuerySlice{ + {RefId: "A", Query: "asd", DataSource: &DataSourceInfo{Id: 1, Type: "test"}}, + {RefId: "B", Query: "#A / 2", DataSource: &DataSourceInfo{Id: 2, Type: "test"}, + Depends: []string{"A"}, + }, + }, + } + + fakeExecutor := registerFakeExecutor() + fakeExecutor.HandleQuery("A", func(c *QueryContext) *QueryResult { + time.Sleep(10 * time.Millisecond) + return &QueryResult{ + Series: TimeSeriesSlice{ + &TimeSeries{Name: "Ares"}, + }} + }) + fakeExecutor.HandleQuery("B", func(c *QueryContext) *QueryResult { + return &QueryResult{ + Series: TimeSeriesSlice{ + &TimeSeries{Name: "Bres+" + c.Results["A"].Series[0].Name}, + }} + }) + + res, err := HandleRequest(req) + So(err, ShouldBeNil) + + Convey("Should have been batched in two requests", func() { + So(len(res.BatchTimings), ShouldEqual, 2) + }) + + Convey("Query B should have access to Query A results", func() { + So(res.Results["B"].Series[0].Name, ShouldEqual, "Bres+Ares") + }) + }) +} + +func registerFakeExecutor() *FakeExecutor { + executor := NewFakeExecutor(nil) + RegisterExecutor("test", func(dsInfo *DataSourceInfo) Executor { + return executor + }) + + return executor +} diff --git a/public/app/features/alerting/partials/alert_list.html b/public/app/features/alerting/partials/alert_list.html index f99df0c0a7b..ce0b6c4b6bd 100644 --- a/public/app/features/alerting/partials/alert_list.html +++ b/public/app/features/alerting/partials/alert_list.html @@ -21,7 +21,7 @@ - {{alert.title}} + {{alert.name}} diff --git a/public/app/plugins/panel/graph/alert_tab_ctrl.ts b/public/app/plugins/panel/graph/alert_tab_ctrl.ts index 8e623e6f4aa..11aebe7e4e7 100644 --- a/public/app/plugins/panel/graph/alert_tab_ctrl.ts +++ b/public/app/plugins/panel/graph/alert_tab_ctrl.ts @@ -29,8 +29,8 @@ export class AlertTabCtrl { _.defaults(this.panel.alerting, this.defaultValues); - var defaultTitle = (this.panelCtrl.dashboard.title + ' ' + this.panel.title + ' alert'); - this.panel.alerting.title = this.panel.alerting.title || defaultTitle; + var defaultName = (this.panelCtrl.dashboard.title + ' ' + this.panel.title + ' alert'); + this.panel.alerting.name = this.panel.alerting.name || defaultName; this.panel.targets.map(target => { this.metricTargets.push(target); diff --git a/public/app/plugins/panel/graph/partials/tab_alerting.html b/public/app/plugins/panel/graph/partials/tab_alerting.html index 8055ef2a883..aeafebf2f83 100644 --- a/public/app/plugins/panel/graph/partials/tab_alerting.html +++ b/public/app/plugins/panel/graph/partials/tab_alerting.html @@ -60,7 +60,7 @@
Alert info
Alert name - +