Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 2 additions & 13 deletions crates/core/src/db/datastore/locking_tx_datastore/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,6 @@ use self::{
sequence::Sequence,
table::Table,
};
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::ops::Deref;
use std::time::{Duration, Instant};
use std::{
Expand All @@ -35,7 +33,7 @@ use crate::db::datastore::system_tables::{
StIndexFields, StModuleRow, StSequenceFields, StTableFields, ST_CONSTRAINTS_ID, ST_MODULE_ID, WASM_MODULE,
};
use crate::db::db_metrics::{DB_METRICS, MAX_TX_CPU_TIME};
use crate::execution_context::{ExecutionContext, WorkloadType};
use crate::execution_context::ExecutionContext;
use crate::{db::datastore::system_tables, error::IndexError};
use crate::{
db::datastore::traits::{TxOp, TxRecord},
Expand Down Expand Up @@ -2399,18 +2397,9 @@ impl traits::MutTx for Locking {
.with_label_values(workload, db, reducer)
.observe(elapsed_time);

fn hash(a: &WorkloadType, b: &Address, c: &str) -> u64 {
use std::hash::Hash;
let mut hasher = DefaultHasher::new();
a.hash(&mut hasher);
b.hash(&mut hasher);
c.hash(&mut hasher);
hasher.finish()
}

let mut guard = MAX_TX_CPU_TIME.lock().unwrap();
let max_cpu_time = *guard
.entry(hash(workload, db, reducer))
.entry((*db, *workload, reducer.to_owned()))
Comment on lines -2413 to +2402
Copy link
Collaborator Author

@joshua-spacetime joshua-spacetime Jan 9, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note: This change is not related to the main fix. This was an optimization to avoid extra allocations when recording metrics. But I'd rather take the allocations than deal with a potential hash collision when debugging a performance issue.

.and_modify(|max| {
if cpu_time > *max {
*max = cpu_time;
Expand Down
14 changes: 11 additions & 3 deletions crates/core/src/db/db_metrics/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -157,13 +157,21 @@ metrics_group!(
}
);

pub static MAX_TX_CPU_TIME: Lazy<Mutex<HashMap<u64, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static MAX_QUERY_CPU_TIME: Lazy<Mutex<HashMap<u64, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static MAX_QUERY_COMPILE_TIME: Lazy<Mutex<HashMap<u64, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
type Triple = (Address, WorkloadType, String);

pub static MAX_TX_CPU_TIME: Lazy<Mutex<HashMap<Triple, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static MAX_QUERY_CPU_TIME: Lazy<Mutex<HashMap<Triple, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static MAX_QUERY_COMPILE_TIME: Lazy<Mutex<HashMap<Triple, f64>>> = Lazy::new(|| Mutex::new(HashMap::new()));
pub static DB_METRICS: Lazy<DbMetrics> = Lazy::new(DbMetrics::new);

pub fn reset_counters() {
// Reset max reducer durations
DB_METRICS.rdb_txn_cpu_time_sec_max.0.reset();
MAX_TX_CPU_TIME.lock().unwrap().clear();
// Reset max query durations
DB_METRICS.rdb_query_cpu_time_sec_max.0.reset();
MAX_QUERY_CPU_TIME.lock().unwrap().clear();
// Reset max query compile durations
DB_METRICS.rdb_query_compile_time_sec_max.0.reset();
MAX_QUERY_COMPILE_TIME.lock().unwrap().clear();
}
13 changes: 1 addition & 12 deletions crates/core/src/subscription/query.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
use std::collections::hash_map::DefaultHasher;
use std::hash::Hasher;
use std::time::Instant;

use crate::db::datastore::locking_tx_datastore::MutTxId;
Expand Down Expand Up @@ -160,19 +158,10 @@ fn record_query_compilation_metrics(workload: WorkloadType, db: &Address, query:
.with_label_values(&workload, db, query)
.observe(compile_duration);

fn hash(a: WorkloadType, b: &Address, c: &str) -> u64 {
use std::hash::Hash;
let mut hasher = DefaultHasher::new();
a.hash(&mut hasher);
b.hash(&mut hasher);
c.hash(&mut hasher);
hasher.finish()
}

let max_compile_duration = *MAX_QUERY_COMPILE_TIME
.lock()
.unwrap()
.entry(hash(workload, db, query))
.entry((*db, workload, query.to_owned()))
.and_modify(|max| {
if compile_duration > *max {
*max = compile_duration;
Expand Down
13 changes: 1 addition & 12 deletions crates/core/src/subscription/subscription.rs
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,7 @@

use anyhow::Context;
use derive_more::{Deref, DerefMut, From, IntoIterator};
use std::collections::hash_map::DefaultHasher;
use std::collections::{btree_set, BTreeSet, HashMap, HashSet};
use std::hash::Hasher;
use std::ops::Deref;
use std::time::Instant;

Expand Down Expand Up @@ -424,19 +422,10 @@ fn record_query_duration_metrics(workload: WorkloadType, db: &Address, query: &s
.with_label_values(&workload, db, query)
.observe(query_duration);

fn hash(a: WorkloadType, b: &Address, c: &str) -> u64 {
use std::hash::Hash;
let mut hasher = DefaultHasher::new();
a.hash(&mut hasher);
b.hash(&mut hasher);
c.hash(&mut hasher);
hasher.finish()
}

let max_query_duration = *MAX_QUERY_CPU_TIME
.lock()
.unwrap()
.entry(hash(workload, db, query))
.entry((*db, workload, query.to_owned()))
.and_modify(|max| {
if query_duration > *max {
*max = query_duration;
Expand Down