Alerting: Update external alert manager notifier (#103530)

This commit is contained in:
Alexander Akhmetov
2025-04-07 17:09:23 +02:00
committed by GitHub
parent b9bc069fb9
commit 09a7f9ba1c
5 changed files with 1446 additions and 127 deletions

4
go.mod
View File

@ -197,6 +197,7 @@ require (
google.golang.org/protobuf v1.36.6 // @grafana/plugins-platform-backend
gopkg.in/ini.v1 v1.67.0 // @grafana/alerting-backend
gopkg.in/mail.v2 v2.3.1 // @grafana/grafana-backend-group
gopkg.in/yaml.v2 v2.4.0 // @grafana/alerting-backend
gopkg.in/yaml.v3 v3.0.1 // @grafana/alerting-backend
k8s.io/api v0.32.3 // @grafana/grafana-app-platform-squad
k8s.io/apimachinery v0.32.3 // @grafana/grafana-app-platform-squad
@ -493,7 +494,7 @@ require (
github.com/prometheus/common/sigv4 v0.1.0 // indirect
github.com/prometheus/exporter-toolkit v0.13.2 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/prometheus/sigv4 v0.1.0 // indirect
github.com/prometheus/sigv4 v0.1.0 // @grafana/alerting-backend
github.com/protocolbuffers/txtpbfmt v0.0.0-20241112170944-20d2c9ebc01d // indirect
github.com/puzpuzpuz/xsync/v2 v2.5.1 // indirect
github.com/redis/rueidis v1.0.53 // indirect
@ -569,7 +570,6 @@ require (
gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect
gopkg.in/src-d/go-errors.v1 v1.0.0 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
k8s.io/apiextensions-apiserver v0.32.3 // indirect
k8s.io/kms v0.32.3 // indirect
modernc.org/libc v1.61.13 // indirect

View File

@ -1,6 +1,6 @@
// THIS FILE IS COPIED FROM UPSTREAM
//
// https://github.com/prometheus/prometheus/blob/edfc3bcd025dd6fe296c167a14a216cab1e552ee/notifier/notifier.go
// https://github.com/prometheus/prometheus/blob/293f0c9185260165fd7dabbf8a9e8758b32abeae/notifier/notifier.go
//
// Copyright 2013 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
@ -15,26 +15,30 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// nolint
//nolint:all
package sender
import (
"context"
"crypto/md5"
"encoding/hex"
"fmt"
"log/slog"
"net/http"
"net/url"
"path"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/go-openapi/strfmt"
"github.com/prometheus/alertmanager/api/v2/models"
"github.com/prometheus/client_golang/prometheus"
config_util "github.com/prometheus/common/config"
"github.com/prometheus/common/model"
"github.com/prometheus/common/promslog"
"github.com/prometheus/common/version"
"github.com/prometheus/sigv4"
"gopkg.in/yaml.v2"
"github.com/prometheus/prometheus/config"
"github.com/prometheus/prometheus/discovery/targetgroup"
@ -53,7 +57,7 @@ const (
alertmanagerLabel = "alertmanager"
)
var userAgent = fmt.Sprintf("Prometheus/%s", version.Version)
var userAgent = version.PrometheusUserAgent()
// Alert is a generic representation of an alert in the Prometheus eco-system.
type Alert struct {
@ -110,20 +114,22 @@ type Manager struct {
metrics *alertMetrics
more chan struct{}
mtx sync.RWMutex
ctx context.Context
cancel func()
more chan struct{}
mtx sync.RWMutex
stopOnce *sync.Once
stopRequested chan struct{}
alertmanagers map[string]*alertmanagerSet
logger log.Logger
logger *slog.Logger
}
// Options are the configurable parameters of a Handler.
type Options struct {
QueueCapacity int
ExternalLabels labels.Labels
RelabelConfigs []*relabel.Config
QueueCapacity int
DrainOnShutdown bool
ExternalLabels labels.Labels
RelabelConfigs []*relabel.Config
// Used for sending HTTP requests to the Alertmanager.
Do func(ctx context.Context, client *http.Client, req *http.Request) (*http.Response, error)
@ -155,7 +161,7 @@ func newAlertMetrics(r prometheus.Registerer, queueCap int, queueLen, alertmanag
Namespace: namespace,
Subsystem: subsystem,
Name: "errors_total",
Help: "Total number of errors sending alert notifications.",
Help: "Total number of sent alerts affected by errors.",
},
[]string{alertmanagerLabel},
),
@ -216,23 +222,21 @@ func do(ctx context.Context, client *http.Client, req *http.Request) (*http.Resp
}
// NewManager is the manager constructor.
func NewManager(o *Options, logger log.Logger) *Manager {
ctx, cancel := context.WithCancel(context.Background())
func NewManager(o *Options, logger *slog.Logger) *Manager {
if o.Do == nil {
o.Do = do
}
if logger == nil {
logger = log.NewNopLogger()
logger = promslog.NewNopLogger()
}
n := &Manager{
queue: make([]*Alert, 0, o.QueueCapacity),
ctx: ctx,
cancel: cancel,
more: make(chan struct{}, 1),
opts: o,
logger: logger,
queue: make([]*Alert, 0, o.QueueCapacity),
more: make(chan struct{}, 1),
stopRequested: make(chan struct{}),
stopOnce: &sync.Once{},
opts: o,
logger: logger,
}
queueLenFunc := func() float64 { return float64(n.queueLen()) }
@ -274,38 +278,100 @@ func (n *Manager) nextBatch() []*Alert {
return alerts
}
// Run dispatches notifications continuously.
// Run dispatches notifications continuously, returning once Stop has been called and all
// pending notifications have been drained from the queue (if draining is enabled).
//
// Dispatching of notifications occurs in parallel to processing target updates to avoid one starving the other.
// Refer to https://github.com/prometheus/prometheus/issues/13676 for more details.
func (n *Manager) Run(tsets <-chan map[string][]*targetgroup.Group) {
wg := sync.WaitGroup{}
wg.Add(2)
go func() {
defer wg.Done()
n.targetUpdateLoop(tsets)
}()
go func() {
defer wg.Done()
n.sendLoop()
n.drainQueue()
}()
wg.Wait()
n.logger.Info("Notification manager stopped")
}
// sendLoop continuously consumes the notifications queue and sends alerts to
// the configured Alertmanagers.
func (n *Manager) sendLoop() {
for {
// The select is split in two parts, such as we will first try to read
// new alertmanager targets if they are available, before sending new
// alerts.
// If we've been asked to stop, that takes priority over sending any further notifications.
select {
case <-n.ctx.Done():
case <-n.stopRequested:
return
case ts := <-tsets:
n.reload(ts)
default:
select {
case <-n.ctx.Done():
case <-n.stopRequested:
return
case <-n.more:
n.sendOneBatch()
// If the queue still has items left, kick off the next iteration.
if n.queueLen() > 0 {
n.setMore()
}
}
}
}
}
// targetUpdateLoop receives updates of target groups and triggers a reload.
func (n *Manager) targetUpdateLoop(tsets <-chan map[string][]*targetgroup.Group) {
for {
// If we've been asked to stop, that takes priority over processing any further target group updates.
select {
case <-n.stopRequested:
return
default:
select {
case <-n.stopRequested:
return
case ts := <-tsets:
n.reload(ts)
case <-n.more:
}
}
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
n.metrics.dropped.Add(float64(len(alerts)))
}
// If the queue still has items left, kick off the next iteration.
if n.queueLen() > 0 {
n.setMore()
}
}
}
func (n *Manager) sendOneBatch() {
alerts := n.nextBatch()
if !n.sendAll(alerts...) {
n.metrics.dropped.Add(float64(len(alerts)))
}
}
func (n *Manager) drainQueue() {
if !n.opts.DrainOnShutdown {
if n.queueLen() > 0 {
n.logger.Warn("Draining remaining notifications on shutdown is disabled, and some notifications have been dropped", "count", n.queueLen())
n.metrics.dropped.Add(float64(n.queueLen()))
}
return
}
n.logger.Info("Draining any remaining notifications...")
for n.queueLen() > 0 {
n.sendOneBatch()
}
n.logger.Info("Remaining notifications drained")
}
func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
n.mtx.Lock()
defer n.mtx.Unlock()
@ -313,7 +379,7 @@ func (n *Manager) reload(tgs map[string][]*targetgroup.Group) {
for id, tgroup := range tgs {
am, ok := n.alertmanagers[id]
if !ok {
level.Error(n.logger).Log("msg", "couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id))
n.logger.Error("couldn't sync alert manager set", "err", fmt.Sprintf("invalid id:%v", id))
continue
}
am.sync(tgroup)
@ -326,20 +392,7 @@ func (n *Manager) Send(alerts ...*Alert) {
n.mtx.Lock()
defer n.mtx.Unlock()
// Attach external labels before relabelling and sending.
for _, a := range alerts {
lb := labels.NewBuilder(a.Labels)
n.opts.ExternalLabels.Range(func(l labels.Label) {
if a.Labels.Get(l.Name) == "" {
lb.Set(l.Name, l.Value)
}
})
a.Labels = lb.Labels()
}
alerts = n.relabelAlerts(alerts)
alerts = relabelAlerts(n.opts.RelabelConfigs, n.opts.ExternalLabels, alerts)
if len(alerts) == 0 {
return
}
@ -349,7 +402,7 @@ func (n *Manager) Send(alerts ...*Alert) {
if d := len(alerts) - n.opts.QueueCapacity; d > 0 {
alerts = alerts[d:]
level.Warn(n.logger).Log("msg", "Alert batch larger than queue capacity, dropping alerts", "num_dropped", d)
n.logger.Warn("Alert batch larger than queue capacity, dropping alerts", "num_dropped", d)
n.metrics.dropped.Add(float64(d))
}
@ -358,7 +411,7 @@ func (n *Manager) Send(alerts ...*Alert) {
if d := (len(n.queue) + len(alerts)) - n.opts.QueueCapacity; d > 0 {
n.queue = n.queue[d:]
level.Warn(n.logger).Log("msg", "Alert notification queue full, dropping alerts", "num_dropped", d)
n.logger.Warn("Alert notification queue full, dropping alerts", "num_dropped", d)
n.metrics.dropped.Add(float64(d))
}
n.queue = append(n.queue, alerts...)
@ -367,15 +420,24 @@ func (n *Manager) Send(alerts ...*Alert) {
n.setMore()
}
func (n *Manager) relabelAlerts(alerts []*Alert) []*Alert {
func relabelAlerts(relabelConfigs []*relabel.Config, externalLabels labels.Labels, alerts []*Alert) []*Alert {
lb := labels.NewBuilder(labels.EmptyLabels())
var relabeledAlerts []*Alert
for _, alert := range alerts {
labels, keep := relabel.Process(alert.Labels, n.opts.RelabelConfigs...)
if keep {
alert.Labels = labels
relabeledAlerts = append(relabeledAlerts, alert)
for _, a := range alerts {
lb.Reset(a.Labels)
externalLabels.Range(func(l labels.Label) {
if a.Labels.Get(l.Name) == "" {
lb.Set(l.Name, l.Value)
}
})
keep := relabel.ProcessBuilder(lb, relabelConfigs...)
if !keep {
continue
}
a.Labels = lb.Labels()
relabeledAlerts = append(relabeledAlerts, a)
}
return relabeledAlerts
}
@ -456,10 +518,19 @@ func labelsToOpenAPILabelSet(modelLabelSet labels.Labels) models.LabelSet {
return apiLabelSet
}
// Stop shuts down the notification handler.
// Stop signals the notification manager to shut down and immediately returns.
//
// Run will return once the notification manager has successfully shut down.
//
// The manager will optionally drain any queued notifications before shutting down.
//
// Stop is safe to call multiple times.
func (n *Manager) Stop() {
level.Info(n.logger).Log("msg", "Stopping notification manager...")
n.cancel()
n.logger.Info("Stopping notification manager...")
n.stopOnce.Do(func() {
close(n.stopRequested)
})
}
// Alertmanager holds Alertmanager endpoint information.
@ -479,11 +550,22 @@ func (a alertmanagerLabels) url() *url.URL {
}
}
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger log.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
func newAlertmanagerSet(cfg *config.AlertmanagerConfig, logger *slog.Logger, metrics *alertMetrics) (*alertmanagerSet, error) {
client, err := config_util.NewClientFromConfig(cfg.HTTPClientConfig, "alertmanager")
if err != nil {
return nil, err
}
t := client.Transport
if cfg.SigV4Config != nil {
t, err = sigv4.NewSigV4RoundTripper(cfg.SigV4Config, client.Transport)
if err != nil {
return nil, err
}
}
client.Transport = t
s := &alertmanagerSet{
client: client,
cfg: cfg,
@ -502,7 +584,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
for _, tg := range tgs {
ams, droppedAms, err := AlertmanagerFromGroup(tg, s.cfg)
if err != nil {
level.Error(s.logger).Log("msg", "Creating discovered Alertmanagers failed", "err", err)
s.logger.Error("Creating discovered Alertmanagers failed", "err", err)
continue
}
allAms = append(allAms, ams...)
@ -511,6 +593,7 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
s.mtx.Lock()
defer s.mtx.Unlock()
previousAms := s.ams
// Set new Alertmanagers and deduplicate them along their unique URL.
s.ams = []alertmanager{}
s.droppedAms = []alertmanager{}
@ -530,6 +613,26 @@ func (s *alertmanagerSet) sync(tgs []*targetgroup.Group) {
seen[us] = struct{}{}
s.ams = append(s.ams, am)
}
// Now remove counters for any removed Alertmanagers.
for _, am := range previousAms {
us := am.url().String()
if _, ok := seen[us]; ok {
continue
}
s.metrics.latency.DeleteLabelValues(us)
s.metrics.sent.DeleteLabelValues(us)
s.metrics.errors.DeleteLabelValues(us)
seen[us] = struct{}{}
}
}
func (s *alertmanagerSet) configHash() (string, error) {
b, err := yaml.Marshal(s.cfg)
if err != nil {
return "", err
}
hash := md5.Sum(b)
return hex.EncodeToString(hash[:]), nil
}
func postPath(pre string, v config.AlertmanagerAPIVersion) string {
@ -540,38 +643,40 @@ func postPath(pre string, v config.AlertmanagerAPIVersion) string {
// AlertmanagerFromGroup extracts a list of alertmanagers from a target group
// and an associated AlertmanagerConfig.
func AlertmanagerFromGroup(tg *targetgroup.Group, cfg *config.AlertmanagerConfig) ([]alertmanager, []alertmanager, error) {
res := make([]alertmanager, 0, len(tg.Targets))
var res []alertmanager
var droppedAlertManagers []alertmanager
lb := labels.NewBuilder(labels.EmptyLabels())
for _, tlset := range tg.Targets {
lbls := make([]labels.Label, 0, len(tlset)+2+len(tg.Labels))
lb.Reset(labels.EmptyLabels())
for ln, lv := range tlset {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
lb.Set(string(ln), string(lv))
}
// Set configured scheme as the initial scheme label for overwrite.
lbls = append(lbls, labels.Label{Name: model.SchemeLabel, Value: cfg.Scheme})
lbls = append(lbls, labels.Label{Name: pathLabel, Value: postPath(cfg.PathPrefix, cfg.APIVersion)})
lb.Set(model.SchemeLabel, cfg.Scheme)
lb.Set(pathLabel, postPath(cfg.PathPrefix, cfg.APIVersion))
// Combine target labels with target group labels.
for ln, lv := range tg.Labels {
if _, ok := tlset[ln]; !ok {
lbls = append(lbls, labels.Label{Name: string(ln), Value: string(lv)})
lb.Set(string(ln), string(lv))
}
}
lset, keep := relabel.Process(labels.New(lbls...), cfg.RelabelConfigs...)
preRelabel := lb.Labels()
keep := relabel.ProcessBuilder(lb, cfg.RelabelConfigs...)
if !keep {
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{labels.New(lbls...)})
droppedAlertManagers = append(droppedAlertManagers, alertmanagerLabels{preRelabel})
continue
}
addr := lset.Get(model.AddressLabel)
addr := lb.Get(model.AddressLabel)
if err := config.CheckTargetAddress(model.LabelValue(addr)); err != nil {
return nil, nil, err
}
res = append(res, alertmanagerLabels{lset})
res = append(res, alertmanagerLabels{lb.Labels()})
}
return res, droppedAlertManagers, nil
}

View File

@ -10,17 +10,15 @@ import (
"bytes"
"context"
"encoding/json"
"errors"
"fmt"
"io"
"log/slog"
"net/http"
"sync"
"time"
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/config"
"go.uber.org/atomic"
"github.com/prometheus/prometheus/model/labels"
)
// ApplyConfig updates the status state as the new config requires.
@ -33,12 +31,33 @@ func (n *Manager) ApplyConfig(conf *config.Config, headers map[string]http.Heade
n.opts.RelabelConfigs = conf.AlertingConfig.AlertRelabelConfigs
amSets := make(map[string]*alertmanagerSet)
// configToAlertmanagers maps alertmanager sets for each unique AlertmanagerConfig,
// helping to avoid dropping known alertmanagers and re-use them without waiting for SD updates when applying the config.
configToAlertmanagers := make(map[string]*alertmanagerSet, len(n.alertmanagers))
for _, oldAmSet := range n.alertmanagers {
hash, err := oldAmSet.configHash()
if err != nil {
return err
}
configToAlertmanagers[hash] = oldAmSet
}
for k, cfg := range conf.AlertingConfig.AlertmanagerConfigs.ToMap() {
ams, err := newAlertmanagerSet(cfg, n.logger, n.metrics)
if err != nil {
return err
}
hash, err := ams.configHash()
if err != nil {
return err
}
if oldAmSet, ok := configToAlertmanagers[hash]; ok {
ams.ams = oldAmSet.ams
ams.droppedAms = oldAmSet.droppedAms
}
// Extension: set the headers to the alertmanager set.
if headers, ok := headers[k]; ok {
ams.headers = headers
@ -65,11 +84,12 @@ type alertmanagerSet struct {
mtx sync.RWMutex
ams []alertmanager
droppedAms []alertmanager
logger log.Logger
logger *slog.Logger
}
// sendAll sends the alerts to all configured Alertmanagers concurrently.
// It returns true if the alerts could be sent successfully to at least one Alertmanager.
// Extension: passing headers from each ams to sendOne
func (n *Manager) sendAll(alerts ...*Alert) bool {
if len(alerts) == 0 {
return true
@ -77,60 +97,63 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
begin := time.Now()
// v1Payload and v2Payload represent 'alerts' marshaled for Alertmanager API
// v1 or v2. Marshaling happens below. Reference here is for caching between
// cachedPayload represent 'alerts' marshaled for Alertmanager API v2.
// Marshaling happens below. Reference here is for caching between
// for loop iterations.
var v1Payload, v2Payload []byte
var cachedPayload []byte
n.mtx.RLock()
amSets := n.alertmanagers
n.mtx.RUnlock()
var (
wg sync.WaitGroup
numSuccess atomic.Uint64
wg sync.WaitGroup
amSetCovered sync.Map
)
for _, ams := range amSets {
for k, ams := range amSets {
var (
payload []byte
err error
payload []byte
err error
amAlerts = alerts
)
ams.mtx.RLock()
switch ams.cfg.APIVersion {
case config.AlertmanagerAPIVersionV1:
{
if v1Payload == nil {
v1Payload, err = json.Marshal(alerts)
if err != nil {
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v1 failed", "err", err)
ams.mtx.RUnlock()
return false
}
}
if len(ams.ams) == 0 {
ams.mtx.RUnlock()
continue
}
payload = v1Payload
if len(ams.cfg.AlertRelabelConfigs) > 0 {
amAlerts = relabelAlerts(ams.cfg.AlertRelabelConfigs, labels.Labels{}, alerts)
if len(amAlerts) == 0 {
ams.mtx.RUnlock()
continue
}
// We can't use the cached values from previous iteration.
cachedPayload = nil
}
switch ams.cfg.APIVersion {
case config.AlertmanagerAPIVersionV2:
{
if v2Payload == nil {
openAPIAlerts := alertsToOpenAPIAlerts(alerts)
if cachedPayload == nil {
openAPIAlerts := alertsToOpenAPIAlerts(amAlerts)
v2Payload, err = json.Marshal(openAPIAlerts)
cachedPayload, err = json.Marshal(openAPIAlerts)
if err != nil {
level.Error(n.logger).Log("msg", "Encoding alerts for Alertmanager API v2 failed", "err", err)
n.logger.Error("Encoding alerts for Alertmanager API v2 failed", "err", err)
ams.mtx.RUnlock()
return false
}
}
payload = v2Payload
payload = cachedPayload
}
default:
{
level.Error(n.logger).Log(
"msg", fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
n.logger.Error(
fmt.Sprintf("Invalid Alertmanager API version '%v', expected one of '%v'", ams.cfg.APIVersion, config.SupportedAlertmanagerAPIVersions),
"err", err,
)
ams.mtx.RUnlock()
@ -138,26 +161,34 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
}
}
if len(ams.cfg.AlertRelabelConfigs) > 0 {
// We can't use the cached values on the next iteration.
cachedPayload = nil
}
// Being here means len(ams.ams) > 0
amSetCovered.Store(k, false)
for _, am := range ams.ams {
wg.Add(1)
ctx, cancel := context.WithTimeout(n.ctx, time.Duration(ams.cfg.Timeout))
ctx, cancel := context.WithTimeout(context.Background(), time.Duration(ams.cfg.Timeout))
defer cancel()
// Extension: added headers parameter.
go func(client *http.Client, url string, headers http.Header) {
// Treat cancellations as a success, so that we don't increment error or dropped counters.
if err := n.sendOne(ctx, client, url, payload, headers); err != nil && !errors.Is(err, context.Canceled) {
level.Error(n.logger).Log("alertmanager", url, "count", len(alerts), "msg", "Error sending alert", "err", err)
n.metrics.errors.WithLabelValues(url).Inc()
go func(ctx context.Context, k string, client *http.Client, url string, payload []byte, count int, headers http.Header) {
err := n.sendOne(ctx, client, url, payload, headers)
if err != nil {
n.logger.Error("Error sending alerts", "alertmanager", url, "count", count, "err", err)
n.metrics.errors.WithLabelValues(url).Add(float64(count))
} else {
numSuccess.Inc()
amSetCovered.CompareAndSwap(k, false, true)
}
n.metrics.latency.WithLabelValues(url).Observe(time.Since(begin).Seconds())
n.metrics.sent.WithLabelValues(url).Add(float64(len(alerts)))
n.metrics.sent.WithLabelValues(url).Add(float64(count))
wg.Done()
}(ams.client, am.url().String(), ams.headers)
}(ctx, k, ams.client, am.url().String(), payload, len(amAlerts), ams.headers)
}
ams.mtx.RUnlock()
@ -165,12 +196,23 @@ func (n *Manager) sendAll(alerts ...*Alert) bool {
wg.Wait()
return numSuccess.Load() > 0
// Return false if there are any sets which were attempted (e.g. not filtered
// out) but have no successes.
allAmSetsCovered := true
amSetCovered.Range(func(_, value any) bool {
if !value.(bool) {
allAmSetsCovered = false
return false
}
return true
})
return allAmSetsCovered
}
// Extension: added headers parameter.
func (n *Manager) sendOne(ctx context.Context, c *http.Client, url string, b []byte, headers http.Header) error {
req, err := http.NewRequest("POST", url, bytes.NewReader(b))
req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(b))
if err != nil {
return err
}

File diff suppressed because it is too large Load Diff

View File

@ -28,6 +28,7 @@ import (
const (
defaultMaxQueueCapacity = 10000
defaultTimeout = 10 * time.Second
defaultDrainOnShutdown = true
)
// ExternalAlertmanager is responsible for dispatching alert notifications to an external Alertmanager service.
@ -106,8 +107,8 @@ func NewExternalAlertmanagerSender(l log.Logger, reg prometheus.Registerer, opts
s.manager = NewManager(
// Injecting a new registry here means these metrics are not exported.
// Once we fix the individual Alertmanager metrics we should fix this scenario too.
&Options{QueueCapacity: defaultMaxQueueCapacity, Registerer: reg},
s.logger,
&Options{QueueCapacity: defaultMaxQueueCapacity, Registerer: reg, DrainOnShutdown: defaultDrainOnShutdown},
toSlogLogger(s.logger),
)
sdMetrics, err := discovery.CreateAndRegisterSDMetrics(prometheus.NewRegistry())
if err != nil {