Chore: capitalise logs in other backend packages (#74344)

* capitalise logs in observability logs

* capitalise oss-bit-tent packages

* capitalise logs in aws-datasources

* capitalise logs for traces and profiling

* capitalise logs for partner datasources

* capitalise logs in plugins platform

* capitalise logs for observability metrics
This commit is contained in:
Serge Zaitsev
2023-09-04 22:25:43 +02:00
committed by GitHub
parent baea7a7556
commit 93cdc94a94
22 changed files with 47 additions and 47 deletions

View File

@ -79,7 +79,7 @@ func (fs *FS) extractFiles(_ context.Context, pluginArchive *zip.ReadCloser, plu
defer func() {
if err := pluginArchive.Close(); err != nil {
fs.log.Warn("failed to close zip file", "error", err)
fs.log.Warn("Failed to close zip file", "error", err)
}
}()
@ -121,7 +121,7 @@ func (fs *FS) extractFiles(_ context.Context, pluginArchive *zip.ReadCloser, plu
if isSymlink(zf) {
if err := extractSymlink(installDir, zf, dstPath); err != nil {
fs.log.Warn("failed to extract symlink", "error", err)
fs.log.Warn("Failed to extract symlink", "error", err)
continue
}
continue

View File

@ -360,7 +360,7 @@ func (s *Service) DecryptedValues(ctx context.Context, ds *datasources.DataSourc
if exist {
err = json.Unmarshal([]byte(secret), &decryptedValues)
if err != nil {
s.logger.Debug("failed to unmarshal secret value, using legacy secrets", "err", err)
s.logger.Debug("Failed to unmarshal secret value, using legacy secrets", "err", err)
}
}

View File

@ -29,10 +29,10 @@ func NewCachingMiddleware(cachingService caching.CachingService) plugins.ClientM
func NewCachingMiddlewareWithFeatureManager(cachingService caching.CachingService, features *featuremgmt.FeatureManager) plugins.ClientMiddleware {
log := log.New("caching_middleware")
if err := prometheus.Register(QueryCachingRequestHistogram); err != nil {
log.Error("error registering prometheus collector 'QueryRequestHistogram'", "error", err)
log.Error("Error registering prometheus collector 'QueryRequestHistogram'", "error", err)
}
if err := prometheus.Register(ResourceCachingRequestHistogram); err != nil {
log.Error("error registering prometheus collector 'ResourceRequestHistogram'", "error", err)
log.Error("Error registering prometheus collector 'ResourceRequestHistogram'", "error", err)
}
return plugins.ClientMiddlewareFunc(func(next plugins.Client) plugins.Client {
return &CachingMiddleware{

View File

@ -131,7 +131,7 @@ func (kr *KeyRetriever) downloadKeys(ctx context.Context) error {
defer func() {
err := resp.Body.Close()
if err != nil {
kr.log.Warn("error closing response body", "error", err)
kr.log.Warn("Error closing response body", "error", err)
}
}()
@ -145,7 +145,7 @@ func (kr *KeyRetriever) downloadKeys(ctx context.Context) error {
}
if err := json.Unmarshal(body, &data); err != nil {
kr.log.Debug("error unmarshalling response body", "error", err, "body", string(body))
kr.log.Debug("Error unmarshalling response body", "error", err, "body", string(body))
return fmt.Errorf("error unmarshalling response body: %w", err)
}

View File

@ -1537,6 +1537,6 @@ func Test_executeQueryErrorWithDifferentLogAnalyticsCreds(t *testing.T) {
tracer := tracing.InitializeTracerForTest()
_, err := ds.executeQuery(ctx, query, dsInfo, &http.Client{}, dsInfo.Services["Azure Log Analytics"].URL, tracer)
if !strings.Contains(err.Error(), "credentials for Log Analytics are no longer supported") {
t.Error("expecting the error to inform of bad credentials")
t.Error("Expecting the error to inform of bad credentials")
}
}

View File

@ -89,7 +89,7 @@ func doRequestPage(ctx context.Context, logger log.Logger, r *http.Request, dsIn
defer func() {
if err = res.Body.Close(); err != nil {
logger.Warn("failed to close response body", "error", err)
logger.Warn("Failed to close response body", "error", err)
}
}()

View File

@ -228,7 +228,7 @@ func (e *cloudWatchExecutor) executeStartQuery(ctx context.Context, logsClient c
startQueryInput.Limit = aws.Int64(*logsQuery.Limit)
}
logger.Debug("calling startquery with context with input", "input", startQueryInput)
logger.Debug("Calling startquery with context with input", "input", startQueryInput)
return logsClient.StartQueryWithContext(ctx, startQueryInput)
}
@ -238,7 +238,7 @@ func (e *cloudWatchExecutor) handleStartQuery(ctx context.Context, logger log.Lo
if err != nil {
var awsErr awserr.Error
if errors.As(err, &awsErr) && awsErr.Code() == "LimitExceededException" {
logger.Debug("executeStartQuery limit exceeded", "err", awsErr)
logger.Debug("ExecuteStartQuery limit exceeded", "err", awsErr)
return nil, &AWSError{Code: limitExceededException, Message: err.Error()}
}
return nil, err

View File

@ -84,7 +84,7 @@ func (q *CloudWatchQuery) GetGetMetricDataAPIMode() GMDApiMode {
return GMDApiModeSQLExpression
}
q.logger.Warn("could not resolve CloudWatch metric query type. Falling back to metric stat.", "query", q)
q.logger.Warn("Could not resolve CloudWatch metric query type. Falling back to metric stat.", "query", q)
return GMDApiModeMetricStat
}

View File

@ -20,7 +20,7 @@ func ResourceRequestMiddleware(handleFunc models.RouteHandlerFunc, logger log.Lo
pluginContext := httpadapter.PluginConfigFromContext(ctx)
json, httpError := handleFunc(ctx, pluginContext, reqCtxFactory, req.URL.Query())
if httpError != nil {
logger.Error("error handling resource request", "error", httpError.Message)
logger.Error("Error handling resource request", "error", httpError.Message)
respondWithError(rw, httpError)
return
}
@ -28,7 +28,7 @@ func ResourceRequestMiddleware(handleFunc models.RouteHandlerFunc, logger log.Lo
rw.Header().Set("Content-Type", "application/json")
_, err := rw.Write(json)
if err != nil {
logger.Error("error handling resource request", "error", err)
logger.Error("Error handling resource request", "error", err)
respondWithError(rw, models.NewHttpError("error writing response in resource request middleware", http.StatusInternalServerError, err))
}
}

View File

@ -33,7 +33,7 @@ func (c *PyroscopeClient) ProfileTypes(ctx context.Context) ([]*ProfileType, err
}
defer func() {
if err := resp.Body.Close(); err != nil {
logger.Error("failed to close response body", "err", err)
logger.Error("Failed to close response body", "err", err)
}
}()
@ -97,7 +97,7 @@ func (c *PyroscopeClient) getProfileData(ctx context.Context, profileTypeID, lab
}
url := c.URL + "/render?" + params.Encode()
logger.Debug("calling /render", "url", url)
logger.Debug("Calling /render", "url", url)
resp, err := c.httpClient.Get(url)
if err != nil {
@ -105,7 +105,7 @@ func (c *PyroscopeClient) getProfileData(ctx context.Context, profileTypeID, lab
}
defer func() {
if err := resp.Body.Close(); err != nil {
logger.Error("failed to close response body", "err", err)
logger.Error("Failed to close response body", "err", err)
}
}()
@ -118,7 +118,7 @@ func (c *PyroscopeClient) getProfileData(ctx context.Context, profileTypeID, lab
err = json.Unmarshal(body, &respData)
if err != nil {
logger.Debug("flamegraph data", "body", string(body))
logger.Debug("Flamegraph data", "body", string(body))
return nil, fmt.Errorf("error decoding flamegraph data: %v", err)
}
@ -230,7 +230,7 @@ func (c *PyroscopeClient) LabelNames(ctx context.Context, query string, start in
}
defer func() {
if err := resp.Body.Close(); err != nil {
logger.Error("failed to close response body", "err", err)
logger.Error("Failed to close response body", "err", err)
}
}()
@ -264,7 +264,7 @@ func (c *PyroscopeClient) LabelValues(ctx context.Context, query string, label s
}
defer func() {
if err := resp.Body.Close(); err != nil {
logger.Error("failed to close response body", "err", err)
logger.Error("Failed to close response body", "err", err)
}
}()
var values []string
@ -274,7 +274,7 @@ func (c *PyroscopeClient) LabelValues(ctx context.Context, query string, label s
}
err = json.Unmarshal(body, &values)
if err != nil {
logger.Debug("response", "body", string(body))
logger.Debug("Response", "body", string(body))
return nil, fmt.Errorf("error unmarshaling response %v", err)
}

View File

@ -176,7 +176,7 @@ func levelsToTree(levels []*Level, names []string) *ProfileTree {
// If we still have levels to go, this should not happen. Something is probably wrong with the flamebearer data.
if len(parentsStack) == 0 {
logger.Error("parentsStack is empty but we are not at the the last level", "currentLevel", currentLevel)
logger.Error("ParentsStack is empty but we are not at the the last level", "currentLevel", currentLevel)
break
}
@ -220,7 +220,7 @@ func levelsToTree(levels []*Level, names []string) *ProfileTree {
// We went out of parents bounds so lets move to next parent. We will evaluate the same item again, but
// we will check if it is a child of the next parent item in line.
if len(parentsStack) == 0 {
logger.Error("parentsStack is empty but there are still items in current level", "currentLevel", currentLevel, "itemIndex", itemIndex)
logger.Error("ParentsStack is empty but there are still items in current level", "currentLevel", currentLevel, "itemIndex", itemIndex)
break
}
currentParent = parentsStack[:1][0]

View File

@ -164,7 +164,7 @@ func (s *Service) QueryData(ctx context.Context, req *backend.QueryDataRequest)
defer func() {
err := res.Body.Close()
if err != nil {
logger.Warn("failed to close response body", "error", err)
logger.Warn("Failed to close response body", "error", err)
}
}()
@ -206,7 +206,7 @@ func (s *Service) processQueries(logger log.Logger, queries []backend.DataQuery)
if err != nil {
return nil, nil, nil, err
}
logger.Debug("graphite", "query", model)
logger.Debug("Graphite", "query", model)
currTarget := ""
if fullTarget, err := model.Get(TargetFullModelField).String(); err == nil {
currTarget = fullTarget
@ -214,7 +214,7 @@ func (s *Service) processQueries(logger log.Logger, queries []backend.DataQuery)
currTarget = model.Get(TargetModelField).MustString()
}
if currTarget == "" {
logger.Debug("graphite", "empty query target", model)
logger.Debug("Graphite", "empty query target", model)
emptyQueries = append(emptyQueries, fmt.Sprintf("Query: %v has no target", model))
continue
}

View File

@ -154,7 +154,7 @@ func getHealthCheckMessage(logger log.Logger, message string, err error) (*backe
}, nil
}
logger.Warn("error performing influxdb healthcheck", "err", err.Error())
logger.Warn("Error performing influxdb healthcheck", "err", err.Error())
errorMessage := fmt.Sprintf("%s %s", err.Error(), message)
return &backend.CheckHealthResult{

View File

@ -101,7 +101,7 @@ func (s *Service) CallResource(ctx context.Context, req *backend.CallResourceReq
dsInfo, err := s.getDSInfo(ctx, req.PluginContext)
logger := logger.FromContext(ctx).New("api", "CallResource")
if err != nil {
logger.Error("failed to get data source info", "err", err)
logger.Error("Failed to get data source info", "err", err)
return err
}
return callResource(ctx, req, sender, dsInfo, logger, s.tracer)

View File

@ -93,10 +93,10 @@ func (s *Service) RunStream(ctx context.Context, req *backend.RunStreamRequest,
}
wsurl.RawQuery = params.Encode()
logger.Info("connecting to websocket", "url", wsurl)
logger.Info("Connecting to websocket", "url", wsurl)
c, r, err := websocket.DefaultDialer.Dial(wsurl.String(), nil)
if err != nil {
logger.Error("error connecting to websocket", "err", err)
logger.Error("Error connecting to websocket", "err", err)
return fmt.Errorf("error connecting to websocket")
}
@ -108,7 +108,7 @@ func (s *Service) RunStream(ctx context.Context, req *backend.RunStreamRequest,
_ = r.Body.Close()
}
err = c.Close()
logger.Error("closing loki websocket", "err", err)
logger.Error("Closing loki websocket", "err", err)
}()
prev := data.FrameJSONCache{}
@ -120,7 +120,7 @@ func (s *Service) RunStream(ctx context.Context, req *backend.RunStreamRequest,
for {
_, message, err := c.ReadMessage()
if err != nil {
logger.Error("websocket read:", "err", err)
logger.Error("Websocket read:", "err", err)
return
}
@ -143,7 +143,7 @@ func (s *Service) RunStream(ctx context.Context, req *backend.RunStreamRequest,
}
if err != nil {
logger.Error("websocket write:", "err", err, "raw", message)
logger.Error("Websocket write:", "err", err, "raw", message)
return
}
}
@ -155,14 +155,14 @@ func (s *Service) RunStream(ctx context.Context, req *backend.RunStreamRequest,
for {
select {
case <-done:
logger.Info("socket done")
logger.Info("Socket done")
return nil
case <-ctx.Done():
logger.Info("stop streaming (context canceled)")
logger.Info("Stop streaming (context canceled)")
return nil
case t := <-ticker.C:
count++
logger.Error("loki websocket ping?", "time", t, "count", count)
logger.Error("Loki websocket ping?", "time", t, "count", count)
}
}
}

View File

@ -34,7 +34,7 @@ func newMysqlMacroEngine(logger log.Logger, cfg *setting.Cfg) sqleng.SQLMacroEng
func (m *mySQLMacroEngine) Interpolate(query *backend.DataQuery, timeRange backend.TimeRange, sql string) (string, error) {
matches := restrictedRegExp.FindAllStringSubmatch(sql, 1)
if len(matches) > 0 {
m.logger.Error("show grants, session_user(), current_user(), system_user() or user() not allowed in query")
m.logger.Error("Show grants, session_user(), current_user(), system_user() or user() not allowed in query")
return "", fmt.Errorf("invalid query - %s", m.userError)
}

View File

@ -37,17 +37,17 @@ func (s *Service) CheckHealth(ctx context.Context, req *backend.CheckHealthReque
hc, err := healthcheck(ctx, req, ds)
if err != nil {
logger.Warn("error performing prometheus healthcheck", "err", err.Error())
logger.Warn("Error performing prometheus healthcheck", "err", err.Error())
return nil, err
}
heuristics, err := getHeuristics(ctx, ds)
if err != nil {
logger.Warn("failed to get prometheus heuristics", "err", err.Error())
logger.Warn("Failed to get prometheus heuristics", "err", err.Error())
} else {
jsonDetails, err := json.Marshal(heuristics)
if err != nil {
logger.Warn("failed to marshal heuristics", "err", err)
logger.Warn("Failed to marshal heuristics", "err", err)
} else {
hc.JSONDetails = jsonDetails
}

View File

@ -97,7 +97,7 @@ func getHeuristics(ctx context.Context, i *instance) (*Heuristics, error) {
}
buildInfo, err := getBuildInfo(ctx, i)
if err != nil {
logger.Warn("failed to get prometheus buildinfo", "err", err.Error())
logger.Warn("Failed to get prometheus buildinfo", "err", err.Error())
return nil, fmt.Errorf("failed to get buildinfo: %w", err)
}
if len(buildInfo.Data.Features) == 0 {

View File

@ -38,7 +38,7 @@ type instance struct {
}
func ProvideService(httpClientProvider httpclient.Provider, cfg *setting.Cfg, features featuremgmt.FeatureToggles, tracer tracing.Tracer) *Service {
plog.Debug("initializing")
plog.Debug("Initializing")
return &Service{
im: datasource.NewInstanceManager(newInstanceSettings(httpClientProvider, cfg, features, tracer)),
features: features,

View File

@ -162,7 +162,7 @@ func (s *QueryData) rangeQuery(ctx context.Context, c *client.Client, q *models.
defer func() {
err := res.Body.Close()
if err != nil {
s.log.Warn("failed to close query range response body", "error", err)
s.log.Warn("Failed to close query range response body", "error", err)
}
}()
@ -187,7 +187,7 @@ func (s *QueryData) instantQuery(ctx context.Context, c *client.Client, q *model
defer func() {
err := res.Body.Close()
if err != nil {
s.log.Warn("failed to close response body", "error", err)
s.log.Warn("Failed to close response body", "error", err)
}
}()
@ -205,7 +205,7 @@ func (s *QueryData) exemplarQuery(ctx context.Context, c *client.Client, q *mode
defer func() {
err := res.Body.Close()
if err != nil {
s.log.Warn("failed to close response body", "error", err)
s.log.Warn("Failed to close response body", "error", err)
}
}()
return s.parseResponse(ctx, q, res)

View File

@ -40,7 +40,7 @@ func (s *Service) getTrace(ctx context.Context, pCtx backend.PluginContext, quer
defer func() {
if err := resp.Body.Close(); err != nil {
s.logger.FromContext(ctx).Warn("failed to close response body", "err", err)
s.logger.FromContext(ctx).Warn("Failed to close response body", "err", err)
}
}()

View File

@ -35,7 +35,7 @@ func ProvideService() *Service {
var err error
s.sims, err = sims.NewSimulationEngine()
if err != nil {
s.logger.Error("unable to initialize SimulationEngine", "err", err)
s.logger.Error("Unable to initialize SimulationEngine", "err", err)
}
s.registerScenarios()