Skip to content

Commit 5d57a71

Browse files
add parseable-enterprise 2.6.7 chart (#1625)
* add parseable-enterprise 2.6.7 chart * fix: clippy --------- Co-authored-by: Anant Vindal <anant.v09@protonmail.com>
1 parent f11e09c commit 5d57a71

8 files changed

Lines changed: 162 additions & 172 deletions

File tree

56.3 KB
Binary file not shown.

index.yaml

Lines changed: 102 additions & 77 deletions
Large diffs are not rendered by default.

src/alerts/alert_structs.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -833,7 +833,7 @@ impl AlertStateEntry {
833833

834834
// Create a sorted view without mutating the original
835835
let mut sorted_states = self.states.clone();
836-
sorted_states.sort_by(|a, b| a.last_updated_at.cmp(&b.last_updated_at));
836+
sorted_states.sort_by_key(|a| a.last_updated_at);
837837

838838
for transition in &sorted_states {
839839
match transition.state {

src/alerts/mod.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1459,8 +1459,8 @@ impl AlertManagerTrait for Alerts {
14591459
// let alerts = self.alerts.read().await;
14601460
let mut tags = if let Some(alerts) = self.alerts.read().await.get(tenant) {
14611461
alerts
1462-
.iter()
1463-
.filter_map(|(_, alert)| alert.get_tags().as_ref())
1462+
.values()
1463+
.filter_map(|alert| alert.get_tags().as_ref())
14641464
.flat_map(|t| t.iter().cloned())
14651465
.collect::<Vec<String>>()
14661466
} else {

src/handlers/http/logstream.rs

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -280,15 +280,15 @@ pub async fn get_stats_date(
280280
let events_ingested = EVENTS_INGESTED_DATE
281281
.get_metric_with_label_values(&event_labels)
282282
.unwrap()
283-
.get() as u64;
283+
.get();
284284
let ingestion_size = EVENTS_INGESTED_SIZE_DATE
285285
.get_metric_with_label_values(&event_labels)
286286
.unwrap()
287-
.get() as u64;
287+
.get();
288288
let storage_size = EVENTS_STORAGE_SIZE_DATE
289289
.get_metric_with_label_values(&storage_size_labels)
290290
.unwrap()
291-
.get() as u64;
291+
.get();
292292

293293
let stats = Stats {
294294
events: events_ingested,

src/metrics/prom_utils.rs

Lines changed: 20 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -129,29 +129,26 @@ impl Metrics {
129129
for sample in samples {
130130
if let PromValue::Gauge(val) = sample.value {
131131
match sample.metric.as_str() {
132-
"parseable_events_ingested_date" => {
132+
"parseable_events_ingested_date"
133133
if sample.labels.get("stream").expect("stream name is present")
134134
== stream_name
135-
&& sample.labels.get("date").expect("date is present") == date
136-
{
137-
events_ingested = val as u64;
138-
}
135+
&& sample.labels.get("date").expect("date is present") == date =>
136+
{
137+
events_ingested = val as u64;
139138
}
140-
"parseable_events_ingested_size_date" => {
139+
"parseable_events_ingested_size_date"
141140
if sample.labels.get("stream").expect("stream name is present")
142141
== stream_name
143-
&& sample.labels.get("date").expect("date is present") == date
144-
{
145-
ingestion_size = val as u64;
146-
}
142+
&& sample.labels.get("date").expect("date is present") == date =>
143+
{
144+
ingestion_size = val as u64;
147145
}
148-
"parseable_events_storage_size_date" => {
146+
"parseable_events_storage_size_date"
149147
if sample.labels.get("stream").expect("stream name is present")
150148
== stream_name
151-
&& sample.labels.get("date").expect("date is present") == date
152-
{
153-
storage_size = val as u64;
154-
}
149+
&& sample.labels.get("date").expect("date is present") == date =>
150+
{
151+
storage_size = val as u64;
155152
}
156153
_ => {}
157154
}
@@ -198,15 +195,15 @@ impl Metrics {
198195
prom_dress.parseable_storage_size.data += val;
199196
}
200197
}
201-
"parseable_lifetime_events_storage_size" => {
202-
if sample.labels.get("type").expect("type is present") == "data" {
203-
prom_dress.parseable_lifetime_storage_size.data += val;
204-
}
198+
"parseable_lifetime_events_storage_size"
199+
if sample.labels.get("type").expect("type is present") == "data" =>
200+
{
201+
prom_dress.parseable_lifetime_storage_size.data += val;
205202
}
206-
"parseable_deleted_events_storage_size" => {
207-
if sample.labels.get("type").expect("type is present") == "data" {
208-
prom_dress.parseable_deleted_storage_size.data += val;
209-
}
203+
"parseable_deleted_events_storage_size"
204+
if sample.labels.get("type").expect("type is present") == "data" =>
205+
{
206+
prom_dress.parseable_deleted_storage_size.data += val;
210207
}
211208
_ => {}
212209
}

src/query/mod.rs

Lines changed: 30 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,26 @@ use crate::parseable::{DEFAULT_TENANT, PARSEABLE};
7373
use crate::storage::{ObjectStorageProvider, ObjectStoreFormat};
7474
use crate::utils::time::TimeRange;
7575

76+
/// Boxed record-batch stream used as the streaming half of query results.
77+
type BoxedBatchStream = Pin<
78+
Box<
79+
RecordBatchStreamAdapter<
80+
select_all::SelectAll<
81+
Pin<
82+
Box<
83+
dyn RecordBatchStream<
84+
Item = Result<RecordBatch, datafusion::error::DataFusionError>,
85+
> + Send,
86+
>,
87+
>,
88+
>,
89+
>,
90+
>,
91+
>;
92+
93+
/// Result type returned by query execution: either collected batches or a streaming adapter, plus field names.
94+
type QueryResult = Result<(Either<Vec<RecordBatch>, BoxedBatchStream>, Vec<String>), ExecuteError>;
95+
7696
// pub static QUERY_SESSION: Lazy<SessionContext> =
7797
// Lazy::new(|| Query::create_session_context(PARSEABLE.storage()));
7898

@@ -133,37 +153,7 @@ impl InMemorySessionContext {
133153

134154
/// This function executes a query on the dedicated runtime, ensuring that the query is not isolated to a single thread/CPU
135155
/// at a time and has access to the entire thread pool, enabling better concurrent processing, and thus quicker results.
136-
pub async fn execute(
137-
query: Query,
138-
is_streaming: bool,
139-
tenant_id: &Option<String>,
140-
) -> Result<
141-
(
142-
Either<
143-
Vec<RecordBatch>,
144-
Pin<
145-
Box<
146-
RecordBatchStreamAdapter<
147-
select_all::SelectAll<
148-
Pin<
149-
Box<
150-
dyn RecordBatchStream<
151-
Item = Result<
152-
RecordBatch,
153-
datafusion::error::DataFusionError,
154-
>,
155-
> + Send,
156-
>,
157-
>,
158-
>,
159-
>,
160-
>,
161-
>,
162-
>,
163-
Vec<String>,
164-
),
165-
ExecuteError,
166-
> {
156+
pub async fn execute(query: Query, is_streaming: bool, tenant_id: &Option<String>) -> QueryResult {
167157
let id = tenant_id.clone();
168158
QUERY_RUNTIME
169159
.spawn(async move { query.execute(is_streaming, &id).await })
@@ -272,37 +262,15 @@ impl Query {
272262
/// this function returns the result of the query
273263
/// if streaming is true, it returns a stream
274264
/// if streaming is false, it returns a vector of record batches
275-
pub async fn execute(
276-
&self,
277-
is_streaming: bool,
278-
tenant_id: &Option<String>,
279-
) -> Result<
280-
(
281-
Either<
282-
Vec<RecordBatch>,
283-
Pin<
284-
Box<
285-
RecordBatchStreamAdapter<
286-
select_all::SelectAll<
287-
Pin<
288-
Box<
289-
dyn RecordBatchStream<
290-
Item = Result<
291-
RecordBatch,
292-
datafusion::error::DataFusionError,
293-
>,
294-
> + Send,
295-
>,
296-
>,
297-
>,
298-
>,
299-
>,
300-
>,
301-
>,
302-
Vec<String>,
303-
),
304-
ExecuteError,
305-
> {
265+
#[tracing::instrument(
266+
name = "datafusion.execute",
267+
skip(self, is_streaming, tenant_id),
268+
fields(
269+
db.system.name = "datafusion",
270+
db.operation.name = "SELECT",
271+
)
272+
)]
273+
pub async fn execute(&self, is_streaming: bool, tenant_id: &Option<String>) -> QueryResult {
306274
let df = QUERY_SESSION
307275
.get_ctx()
308276
.execute_logical_plan(self.final_logical_plan(tenant_id))

src/query/stream_schema_provider.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -759,10 +759,10 @@ fn is_overlapping_query(
759759
for filter in time_filters {
760760
match filter {
761761
PartialTimeFilter::Low(Bound::Excluded(time))
762-
| PartialTimeFilter::Low(Bound::Included(time)) => {
763-
if time < &first_entry_lower_bound.naive_utc() {
764-
return true;
765-
}
762+
| PartialTimeFilter::Low(Bound::Included(time))
763+
if time < &first_entry_lower_bound.naive_utc() =>
764+
{
765+
return true;
766766
}
767767
_ => {}
768768
}

0 commit comments

Comments
 (0)