diff --git a/algo/detectors/sts/StsHitfinder.cxx b/algo/detectors/sts/StsHitfinder.cxx
index 1b5933982ced0bb1b9ac023425ed5ffa3fb9f167..05d848a4d2bf779667ec2087c9ea2fdcd69ca270 100644
--- a/algo/detectors/sts/StsHitfinder.cxx
+++ b/algo/detectors/sts/StsHitfinder.cxx
@@ -520,9 +520,17 @@ XPU_D void sts::Hitfinder::FindHits(FindHits::context& ctx) const
   sts::Cluster* clusterDataB = &clusterDataPerModule[offsetB];
   int nClustersF             = nClustersPerModule[iModuleF];
   int nClustersB             = nClustersPerModule[iModuleB];
+  int nHitsWritten           = nHitsPerModule[iModule];
 
   if (nClustersF == 0 || nClustersB == 0) return;
 
+  // Stop processing if memory limits are exceed by a large amount
+  // In general we would still want to process all clusters to get an idea
+  // by how much memory estimates are wrong.
+  // However they are currently chosen very generous, so if they are exceeded by a large amount
+  // something must have gone awry. (e.g. monster events in the mCBM data that explode the hit finding combinatorics)
+  if (nHitsWritten > 2 * maxHitsPerModule) return;
+
   HitfinderCache pars;
   {
     SensorPar cfg = sensorPars[iModule];
diff --git a/algo/detectors/sts/StsHitfinderChain.cxx b/algo/detectors/sts/StsHitfinderChain.cxx
index e70bfe9f1c0c473e21bb306d5ca0d0e8a2c901a8..e1bde280a4ebff83a0e72c7018d6833f5c6c72a6 100644
--- a/algo/detectors/sts/StsHitfinderChain.cxx
+++ b/algo/detectors/sts/StsHitfinderChain.cxx
@@ -167,7 +167,6 @@ void sts::HitfinderChain::operator()(gsl::span<const CbmStsDigi> digis)
               << " clusters were discarded!";
 
     for (size_t m = 0; m < nModules * 2; m++) {
-      L_(info) << nClusters[m] << " clusters in module " << m << " (of " << hfc.maxClustersPerModule << " max)";
       if (nClusters[m] > hfc.maxClustersPerModule) {
         L_(error) << "STS Hitfinder Chain: Cluster bucket overflow in module " << m << " with " << nClusters[m]
                   << " (of " << hfc.maxClustersPerModule << " max)"