mirror of
https://github.com/juspay/hyperswitch.git
synced 2025-10-30 09:38:33 +08:00
fix: add disable env flag for consumer (#475)
This commit is contained in:
@ -102,4 +102,7 @@ base_url = "https://eu.sandbox.api-ingenico.com/"
|
|||||||
|
|
||||||
[scheduler]
|
[scheduler]
|
||||||
stream = "SCHEDULER_STREAM"
|
stream = "SCHEDULER_STREAM"
|
||||||
|
|
||||||
|
[scheduler.consumer]
|
||||||
|
disabled = false
|
||||||
consumer_group = "SCHEDULER_GROUP"
|
consumer_group = "SCHEDULER_GROUP"
|
||||||
|
|||||||
@ -166,7 +166,10 @@ cards = [
|
|||||||
# It defines the the streams/queues name and configuration as well as event selection variables
|
# It defines the the streams/queues name and configuration as well as event selection variables
|
||||||
[scheduler]
|
[scheduler]
|
||||||
stream = "SCHEDULER_STREAM"
|
stream = "SCHEDULER_STREAM"
|
||||||
|
|
||||||
|
[scheduler.consumer]
|
||||||
consumer_group = "SCHEDULER_GROUP"
|
consumer_group = "SCHEDULER_GROUP"
|
||||||
|
disabled = false # This flag decides if the consumer should actively consume task
|
||||||
|
|
||||||
[scheduler.producer]
|
[scheduler.producer]
|
||||||
upper_fetch_limit = 0 # Upper limit for fetching entries from the redis queue (in seconds)
|
upper_fetch_limit = 0 # Upper limit for fetching entries from the redis queue (in seconds)
|
||||||
|
|||||||
@ -82,8 +82,8 @@ impl Default for super::settings::SchedulerSettings {
|
|||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
Self {
|
Self {
|
||||||
stream: "SCHEDULER_STREAM".into(),
|
stream: "SCHEDULER_STREAM".into(),
|
||||||
consumer_group: "SCHEDULER_GROUP".into(),
|
|
||||||
producer: super::settings::ProducerSettings::default(),
|
producer: super::settings::ProducerSettings::default(),
|
||||||
|
consumer: super::settings::ConsumerSettings::default(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -100,6 +100,15 @@ impl Default for super::settings::ProducerSettings {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Default for super::settings::ConsumerSettings {
|
||||||
|
fn default() -> Self {
|
||||||
|
Self {
|
||||||
|
disabled: false,
|
||||||
|
consumer_group: "SCHEDULER_GROUP".into(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "kv_store")]
|
#[cfg(feature = "kv_store")]
|
||||||
impl Default for super::settings::DrainerSettings {
|
impl Default for super::settings::DrainerSettings {
|
||||||
fn default() -> Self {
|
fn default() -> Self {
|
||||||
|
|||||||
@ -164,8 +164,8 @@ pub struct ConnectorParams {
|
|||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
pub struct SchedulerSettings {
|
pub struct SchedulerSettings {
|
||||||
pub stream: String,
|
pub stream: String,
|
||||||
pub consumer_group: String,
|
|
||||||
pub producer: ProducerSettings,
|
pub producer: ProducerSettings,
|
||||||
|
pub consumer: ConsumerSettings,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
@ -179,6 +179,13 @@ pub struct ProducerSettings {
|
|||||||
pub batch_size: usize,
|
pub batch_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
|
#[serde(default)]
|
||||||
|
pub struct ConsumerSettings {
|
||||||
|
pub disabled: bool,
|
||||||
|
pub consumer_group: String,
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "kv_store")]
|
#[cfg(feature = "kv_store")]
|
||||||
#[derive(Debug, Clone, Deserialize)]
|
#[derive(Debug, Clone, Deserialize)]
|
||||||
#[serde(default)]
|
#[serde(default)]
|
||||||
|
|||||||
@ -152,7 +152,7 @@ impl super::settings::SchedulerSettings {
|
|||||||
))
|
))
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
when(self.consumer_group.is_default_or_empty(), || {
|
when(self.consumer.consumer_group.is_default_or_empty(), || {
|
||||||
Err(ApplicationError::InvalidConfigurationValueError(
|
Err(ApplicationError::InvalidConfigurationValueError(
|
||||||
"scheduler consumer group must not be empty".into(),
|
"scheduler consumer group must not be empty".into(),
|
||||||
))
|
))
|
||||||
|
|||||||
@ -71,6 +71,12 @@ pub async fn start_consumer(
|
|||||||
match rx.try_recv() {
|
match rx.try_recv() {
|
||||||
Err(oneshot::error::TryRecvError::Empty) => {
|
Err(oneshot::error::TryRecvError::Empty) => {
|
||||||
interval.tick().await;
|
interval.tick().await;
|
||||||
|
|
||||||
|
// A guard from env to disable the consumer
|
||||||
|
if settings.consumer.disabled {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
tokio::task::spawn(pt_utils::consumer_operation_handler(
|
tokio::task::spawn(pt_utils::consumer_operation_handler(
|
||||||
state.clone(),
|
state.clone(),
|
||||||
options.clone(),
|
options.clone(),
|
||||||
@ -112,7 +118,7 @@ pub async fn consumer_operations(
|
|||||||
settings: &settings::SchedulerSettings,
|
settings: &settings::SchedulerSettings,
|
||||||
) -> CustomResult<(), errors::ProcessTrackerError> {
|
) -> CustomResult<(), errors::ProcessTrackerError> {
|
||||||
let stream_name = settings.stream.clone();
|
let stream_name = settings.stream.clone();
|
||||||
let group_name = settings.consumer_group.clone();
|
let group_name = settings.consumer.consumer_group.clone();
|
||||||
let consumer_name = format!("consumer_{}", Uuid::new_v4());
|
let consumer_name = format!("consumer_{}", Uuid::new_v4());
|
||||||
|
|
||||||
let group_created = &mut state
|
let group_created = &mut state
|
||||||
|
|||||||
@ -156,7 +156,7 @@ pub fn divide_into_batches(
|
|||||||
.fold(Vec::new(), |mut batches, item| {
|
.fold(Vec::new(), |mut batches, item| {
|
||||||
let batch = ProcessTrackerBatch {
|
let batch = ProcessTrackerBatch {
|
||||||
id: batch_id.clone(),
|
id: batch_id.clone(),
|
||||||
group_name: conf.consumer_group.clone(),
|
group_name: conf.consumer.consumer_group.clone(),
|
||||||
stream_name: conf.stream.clone(),
|
stream_name: conf.stream.clone(),
|
||||||
connection_name: String::new(),
|
connection_name: String::new(),
|
||||||
created_time: batch_creation_time,
|
created_time: batch_creation_time,
|
||||||
|
|||||||
Reference in New Issue
Block a user