refactor: include binary name in service field in log entries (#2077)

This commit is contained in:
Sanchith Hegde
2023-09-04 17:14:47 +05:30
committed by GitHub
parent e1cebd4179
commit 20d44acd20
10 changed files with 43 additions and 54 deletions

View File

@ -1,16 +0,0 @@
#[doc(inline)]
pub use router_env::*;
pub mod logger {
#[doc(inline)]
pub use router_env::{log, logger::*};
/// Setup logging sub-system
pub fn setup(conf: &config::Log) -> TelemetryGuard {
router_env::setup(
conf,
router_env::service_name!(),
[router_env::service_name!()],
)
}
}

View File

@ -1,6 +1,6 @@
mod connection; mod connection;
pub mod env;
pub mod errors; pub mod errors;
pub mod logger;
pub(crate) mod metrics; pub(crate) mod metrics;
pub mod services; pub mod services;
pub mod settings; pub mod settings;
@ -9,7 +9,6 @@ use std::sync::{atomic, Arc};
use common_utils::signals::get_allowed_signals; use common_utils::signals::get_allowed_signals;
use diesel_models::kv; use diesel_models::kv;
pub use env as logger;
use error_stack::{IntoReport, ResultExt}; use error_stack::{IntoReport, ResultExt};
use tokio::sync::mpsc; use tokio::sync::mpsc;

View File

@ -0,0 +1,2 @@
#[doc(inline)]
pub use router_env::*;

View File

@ -20,7 +20,11 @@ async fn main() -> DrainerResult<()> {
let shutdown_intervals = conf.drainer.shutdown_interval; let shutdown_intervals = conf.drainer.shutdown_interval;
let loop_interval = conf.drainer.loop_interval; let loop_interval = conf.drainer.loop_interval;
let _guard = logger::setup(&conf.log); let _guard = router_env::setup(
&conf.log,
router_env::service_name!(),
[router_env::service_name!()],
);
logger::info!("Drainer started [{:?}] [{:?}]", conf.drainer, conf.log); logger::info!("Drainer started [{:?}] [{:?}]", conf.drainer, conf.log);

View File

@ -34,7 +34,11 @@ async fn main() -> ApplicationResult<()> {
conf.validate() conf.validate()
.expect("Failed to validate router configuration"); .expect("Failed to validate router configuration");
let _guard = logger::setup(&conf.log, [router_env::service_name!(), "actix_server"]); let _guard = router_env::setup(
&conf.log,
router_env::service_name!(),
[router_env::service_name!(), "actix_server"],
);
logger::info!("Application started [{:?}] [{:?}]", conf.server, conf.log); logger::info!("Application started [{:?}] [{:?}]", conf.server, conf.log);

View File

@ -1,5 +1,5 @@
#![recursion_limit = "256"] #![recursion_limit = "256"]
use std::sync::Arc; use std::{str::FromStr, sync::Arc};
use error_stack::ResultExt; use error_stack::ResultExt;
use router::{ use router::{
@ -37,11 +37,23 @@ async fn main() -> CustomResult<(), errors::ProcessTrackerError> {
redis_shutdown_signal_rx, redis_shutdown_signal_rx,
tx.clone(), tx.clone(),
)); ));
let _guard = logger::setup(&state.conf.log, [router_env::service_name!()]);
#[allow(clippy::expect_used)]
let scheduler_flow_str =
std::env::var(SCHEDULER_FLOW).expect("SCHEDULER_FLOW environment variable not set");
#[allow(clippy::expect_used)]
let scheduler_flow = scheduler::SchedulerFlow::from_str(&scheduler_flow_str)
.expect("Unable to parse SchedulerFlow from environment variable");
let _guard = router_env::setup(
&state.conf.log,
&scheduler_flow_str,
[router_env::service_name!()],
);
logger::debug!(startup_config=?state.conf); logger::debug!(startup_config=?state.conf);
start_scheduler(&state, (tx, rx)).await?; start_scheduler(&state, scheduler_flow, (tx, rx)).await?;
eprintln!("Scheduler shut down"); eprintln!("Scheduler shut down");
Ok(()) Ok(())
@ -49,20 +61,14 @@ async fn main() -> CustomResult<(), errors::ProcessTrackerError> {
async fn start_scheduler( async fn start_scheduler(
state: &routes::AppState, state: &routes::AppState,
scheduler_flow: scheduler::SchedulerFlow,
channel: (mpsc::Sender<()>, mpsc::Receiver<()>), channel: (mpsc::Sender<()>, mpsc::Receiver<()>),
) -> CustomResult<(), errors::ProcessTrackerError> { ) -> CustomResult<(), errors::ProcessTrackerError> {
use std::str::FromStr;
#[allow(clippy::expect_used)]
let flow = std::env::var(SCHEDULER_FLOW).expect("SCHEDULER_FLOW environment variable not set");
#[allow(clippy::expect_used)]
let flow = scheduler::SchedulerFlow::from_str(&flow)
.expect("Unable to parse SchedulerFlow from environment variable");
let scheduler_settings = state let scheduler_settings = state
.conf .conf
.scheduler .scheduler
.clone() .clone()
.ok_or(errors::ProcessTrackerError::ConfigurationError)?; .ok_or(errors::ProcessTrackerError::ConfigurationError)?;
scheduler::start_process_tracker(state, flow, Arc::new(scheduler_settings), channel).await scheduler::start_process_tracker(state, scheduler_flow, Arc::new(scheduler_settings), channel)
.await
} }

View File

@ -1,14 +1,2 @@
#[doc(inline)] #[doc(inline)]
pub use router_env::*; pub use router_env::*;
pub mod logger {
#[doc(inline)]
pub use router_env::{log, logger::*};
/// Setup logging sub-system.
pub fn setup(
conf: &config::Log,
crates_to_filter: impl AsRef<[&'static str]>,
) -> TelemetryGuard {
router_env::setup(conf, router_env::service_name!(), crates_to_filter)
}
}

View File

@ -147,13 +147,14 @@ macro_rules! commit {
// }; // };
// } // }
/// Service name deduced from name of the crate. /// Service name deduced from name of the binary.
/// This macro must be called within binaries only.
/// ///
/// Example: `router`. /// Example: `router`.
#[macro_export] #[macro_export]
macro_rules! service_name { macro_rules! service_name {
() => { () => {
env!("CARGO_CRATE_NAME") env!("CARGO_BIN_NAME")
}; };
} }

View File

@ -32,7 +32,7 @@ pub struct TelemetryGuard {
/// current cargo workspace are automatically considered for verbose logging. /// current cargo workspace are automatically considered for verbose logging.
pub fn setup( pub fn setup(
config: &config::Log, config: &config::Log,
service_name: &'static str, service_name: &str,
crates_to_filter: impl AsRef<[&'static str]>, crates_to_filter: impl AsRef<[&'static str]>,
) -> TelemetryGuard { ) -> TelemetryGuard {
let mut guards = Vec::new(); let mut guards = Vec::new();
@ -133,7 +133,7 @@ fn get_opentelemetry_exporter(config: &config::LogTelemetry) -> TonicExporterBui
fn setup_tracing_pipeline( fn setup_tracing_pipeline(
config: &config::LogTelemetry, config: &config::LogTelemetry,
service_name: &'static str, service_name: &str,
) -> Option<tracing_opentelemetry::OpenTelemetryLayer<tracing_subscriber::Registry, trace::Tracer>> ) -> Option<tracing_opentelemetry::OpenTelemetryLayer<tracing_subscriber::Registry, trace::Tracer>>
{ {
global::set_text_map_propagator(TraceContextPropagator::new()); global::set_text_map_propagator(TraceContextPropagator::new());
@ -144,7 +144,7 @@ fn setup_tracing_pipeline(
)) ))
.with_resource(Resource::new(vec![KeyValue::new( .with_resource(Resource::new(vec![KeyValue::new(
"service.name", "service.name",
service_name, service_name.to_owned(),
)])); )]));
if config.use_xray_generator { if config.use_xray_generator {
trace_config = trace_config.with_id_generator(trace::XrayIdGenerator::default()); trace_config = trace_config.with_id_generator(trace::XrayIdGenerator::default());

View File

@ -1,18 +1,19 @@
#![allow(clippy::unwrap_used)] #![allow(clippy::unwrap_used)]
use router_env as env;
mod test_module; mod test_module;
use env::TelemetryGuard;
use test_module::some_module::*; use router_env::TelemetryGuard;
use self::test_module::some_module::*;
fn logger() -> &'static TelemetryGuard { fn logger() -> &'static TelemetryGuard {
use once_cell::sync::OnceCell; use once_cell::sync::OnceCell;
static INSTANCE: OnceCell<TelemetryGuard> = OnceCell::new(); static INSTANCE: OnceCell<TelemetryGuard> = OnceCell::new();
INSTANCE.get_or_init(|| { INSTANCE.get_or_init(|| {
let config = env::Config::new().unwrap(); let config = router_env::Config::new().unwrap();
env::logger::setup(&config.log, env::service_name!(), []) router_env::setup(&config.log, "router_env_test", [])
}) })
} }