From 157cb7f266bb99a7bf35c24d16c2b4949cd2c34f Mon Sep 17 00:00:00 2001
From: Alex Wiens <alex.wiens@uni-paderborn.de>
Date: Fri, 28 Feb 2025 15:14:54 +0100
Subject: [PATCH] Prule: Fix interpolation case of only single sample

---
 prule/__init__.py | 17 ++++++++++++-----
 1 file changed, 12 insertions(+), 5 deletions(-)

diff --git a/prule/__init__.py b/prule/__init__.py
index c28811b..dd1bbad 100644
--- a/prule/__init__.py
+++ b/prule/__init__.py
@@ -808,9 +808,10 @@ def rule_prepare_input(parameters, rules, clusters, job_meta, job_data):
             min_samples = min(min_samples, len(column))
             max_samples = max(max_samples, len(column))
         if max_samples - min_samples > 2:
-            log.print(log.warn, "For metric \"{}\" the difference between sample counts is {}.".format(metric, max_samples-min_samples))
-        #print(min_samples, type(min_samples))
-        #print(max_samples, type(max_samples))
+            log.print_color(log.color.yellow, log.warn, "For metric \"{}\" the difference between sample counts is {}.".format(metric, max_samples-min_samples))
+        if min_samples == 0:
+            log.print_color(log.color.yellow, log.warn, "For metric \"{}\" some series contain no samples. Ignoring this metric! min samples: {} max samples: {}.".format(metric, min_samples, max_samples))
+            continue
 
         # extend missing samples at the end with null values
         for ix in range(0,len(dataseries)):
@@ -834,7 +835,7 @@ def rule_prepare_input(parameters, rules, clusters, job_meta, job_data):
                     b = i
                 if b != None: # found NaN sequence
                     #print("fix sequence ", a, b, " of", metadataseries[ix]["metric"]) # debugging
-                    none_count += b-a
+                    none_count += b-a if b != a else 1
                     if a == 0: # sequence at start, set to 0.0
                         dataseries[ix][a] = 0.0
                     else:
@@ -843,6 +844,11 @@ def rule_prepare_input(parameters, rules, clusters, job_meta, job_data):
                         dataseries[ix][b] = 0.0
                     else:
                         b = b+1
+                    if a == b:
+                        dataseries[ix][a] = 0.0
+                        a = None
+                        b = None
+                        continue
                     base = dataseries[ix][a]
                     step = (dataseries[ix][b] - dataseries[ix][a]) / (b-a)
                     for s in range(1,b-a):
@@ -877,7 +883,8 @@ def rule_prepare_input(parameters, rules, clusters, job_meta, job_data):
     for k in metric_names.keys():
         max_metric_name = max(max_metric_name, len(k))
     for k,v in metric_names.items():
-        log.print(log.debug, k+" "*(max_metric_name-len(k)),": ", ", ".join(v)+" min_count:{} max_count:{} ".format(metric_min_sample_count[k],metric_max_sample_count[k])+str(globals[k])+" "+str(metric_units[k]))
+        if k in globals:
+            log.print(log.debug, k+" "*(max_metric_name-len(k)),": ", ", ".join(v)+" min_count:{} max_count:{} ".format(metric_min_sample_count[k],metric_max_sample_count[k])+str(globals[k])+" "+str(metric_units[k]))
     log.print(log.debug, "Globals Size: numpy nbytes for metrics: ", str(metric_nbytes))
         
 
-- 
GitLab