From 066730f67ab6070cfa8b244f9760fe0f191a6a3d Mon Sep 17 00:00:00 2001 From: Felix Weiglhofer <weiglhofer@fias.uni-frankfurt.de> Date: Tue, 11 Jul 2023 13:01:32 +0000 Subject: [PATCH] algo::sts::Hitfinder: Add protection against monster events. --- algo/detectors/sts/StsHitfinder.cxx | 8 ++++++++ algo/detectors/sts/StsHitfinderChain.cxx | 1 - 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/algo/detectors/sts/StsHitfinder.cxx b/algo/detectors/sts/StsHitfinder.cxx index 1b5933982c..05d848a4d2 100644 --- a/algo/detectors/sts/StsHitfinder.cxx +++ b/algo/detectors/sts/StsHitfinder.cxx @@ -520,9 +520,17 @@ XPU_D void sts::Hitfinder::FindHits(FindHits::context& ctx) const sts::Cluster* clusterDataB = &clusterDataPerModule[offsetB]; int nClustersF = nClustersPerModule[iModuleF]; int nClustersB = nClustersPerModule[iModuleB]; + int nHitsWritten = nHitsPerModule[iModule]; if (nClustersF == 0 || nClustersB == 0) return; + // Stop processing if memory limits are exceed by a large amount + // In general we would still want to process all clusters to get an idea + // by how much memory estimates are wrong. + // However they are currently chosen very generous, so if they are exceeded by a large amount + // something must have gone awry. (e.g. monster events in the mCBM data that explode the hit finding combinatorics) + if (nHitsWritten > 2 * maxHitsPerModule) return; + HitfinderCache pars; { SensorPar cfg = sensorPars[iModule]; diff --git a/algo/detectors/sts/StsHitfinderChain.cxx b/algo/detectors/sts/StsHitfinderChain.cxx index e70bfe9f1c..e1bde280a4 100644 --- a/algo/detectors/sts/StsHitfinderChain.cxx +++ b/algo/detectors/sts/StsHitfinderChain.cxx @@ -167,7 +167,6 @@ void sts::HitfinderChain::operator()(gsl::span<const CbmStsDigi> digis) << " clusters were discarded!"; for (size_t m = 0; m < nModules * 2; m++) { - L_(info) << nClusters[m] << " clusters in module " << m << " (of " << hfc.maxClustersPerModule << " max)"; if (nClusters[m] > hfc.maxClustersPerModule) { L_(error) << "STS Hitfinder Chain: Cluster bucket overflow in module " << m << " with " << nClusters[m] << " (of " << hfc.maxClustersPerModule << " max)" -- GitLab