diff --git a/prule/daemon/__main__.py b/prule/daemon/__main__.py
index 1d08d7b51c32ea6659675601c77ba83059f6c0c0..a56a898aa1e3fb0e75c78fa7c3a6ed017ac04d32 100644
--- a/prule/daemon/__main__.py
+++ b/prule/daemon/__main__.py
@@ -89,6 +89,7 @@ import subprocess
 import concurrent.futures
 
 import prule.db
+import prule.debug
 
 #TODO: Add idle behavior: In case no jobs need to be processed, old jobs can be processed. Add backwards updated search times.
 #TODO: Add metadata fetch before job processing. If patho results already existent, skip job.
@@ -319,10 +320,8 @@ class CCCheckThread(threading.Thread):
         # request pages until no jobs are returned anymore
         while self.stopThread == False and (page < 1 or ret > 0):
             page += 1
-            request_jobs_start = datetime.datetime.now().timestamp()
-            newjobs = self.request_jobs(starttime, state, page, items_per_page)
-            if "PRINT_TIMING" in config.config:
-                print("ccthread.request_jobs {:.6f}".format(datetime.datetime.now().timestamp()-request_jobs_start))
+            with prule.debug.Timing("ccthread.request_jobs", "PRINT_TIMING" in config.config):
+                newjobs = self.request_jobs(starttime, state, page, items_per_page)
 
             if newjobs == None:
                 jobs = None
@@ -634,10 +633,8 @@ class PruleThread(threading.Thread):
         process_result = "success"
 
         # retrieve current job metadata
-        request_job_meta_start = datetime.datetime.now().timestamp()
-        job_res = self.request_job_meta(ccjob_id)
-        if "PRINT_TIMING" in config.config:
-            print("prulethread.request_job_meta {:.6f}".format(datetime.datetime.now().timestamp()-request_job_meta_start))
+        with prule.debug.Timing("prulethread.request_job_meta", "PRINT_TIMING" in config.config):
+            job_res = self.request_job_meta(ccjob_id)
 
         if job_res == None:
             return "failure-shutdown"
@@ -649,11 +646,8 @@ class PruleThread(threading.Thread):
         #print("PRule got job:", job_meta)
         already_processed = "metaData" in job_meta and type(job_meta["metaData"]) == list and "patho" in job_meta["metaData"]
         if config.config["CACHE_DB"] == True:
-            db_get_result_start = datetime.datetime.now().timestamp()
-            old_result = self.db_con.db_get_result(job_meta["id"])
-            if "PRINT_TIMING" in config.config:
-                print("prulethread.db_get_result {:.6f}".format(datetime.datetime.now().timestamp()-db_get_result_start))
-
+            with prule.debug.Timing("prulethread.db_get_result", "PRINT_TIMING" in config.config):
+                old_result = self.db_con.db_get_result(job_meta["id"])
 
             already_processed = already_processed or len(old_result) > 0
         if already_processed == True:
@@ -682,10 +676,8 @@ class PruleThread(threading.Thread):
             job_path = os.path.join(job_path, jobarchive_subdir)
         else:
             # Load job from jobarchive api and write it to tempdir
-            request_jobarchive_start = datetime.datetime.now().timestamp()
-            job_tempdir = self.request_jobarchive(ccjob_id)
-            if "PRINT_TIMING" in config.config:
-                print("prulethread.request_jobarchive {:.6f}".format(datetime.datetime.now().timestamp()-request_jobarchive_start))
+            with prule.debug.Timing("prulethread.request_jobarchive", "PRINT_TIMING" in config.config):
+                job_tempdir = self.request_jobarchive(ccjob_id)
 
             if job_tempdir == False:
                 return "failure-shutdown"
@@ -745,10 +737,8 @@ class PruleThread(threading.Thread):
         if config.config["API_METADATA"] == True and result_json != None:
             patho_message = prepare_patho_message(config, result_json)
             if patho_message != None:
-                request_metadata_upload_start = datetime.datetime.now().timestamp()
-                res = self.request_metadata_upload(ccjob_id, {"key":"issues","value":patho_message})
-                if "PRINT_TIMING" in config.config:
-                    print("prulethread.request_metadata_upload {:.6f}".format(datetime.datetime.now().timestamp()-request_metadata_upload_start))
+                with prule.debug.Timing("prulethread.request_metadata_upload", "PRINT_TIMING" in config.config):
+                    res = self.request_metadata_upload(ccjob_id, {"key":"issues","value":patho_message})
 
                 if res == False:
                     print("Job {} process failed to write metadata using API_METADATA".format(ccjob_id))
@@ -764,10 +754,8 @@ class PruleThread(threading.Thread):
                     else:
                         if "scope" in t:
                             t.pop("scope")
-                request_tag_job_start = datetime.datetime.now().timestamp()
-                res = self.request_tag_job(ccjob_id, result_json["tags"])
-                if "PRINT_TIMING" in config.config:
-                    print("prulethread.request_tag_job {:.6f}".format(datetime.datetime.now().timestamp()-request_tag_job_start))
+                with prule.debug.Timing("prulethread.request_tag_job", "PRINT_TIMING" in config.config):
+                    res = self.request_tag_job(ccjob_id, result_json["tags"])
 
                 if res == False:
                     print("Job {} process failed to write tags using API_TAG".format(ccjob_id))
@@ -776,10 +764,8 @@ class PruleThread(threading.Thread):
         # save result to cache db
         if config.config["CACHE_DB"] == True:
             try:
-                db_insert_result_start = datetime.datetime.now().timestamp()
-                self.db_con.db_insert_result(ccjob_id, result_json, job_meta, process_time, True)
-                if "PRINT_TIMING" in config.config:
-                    print("prulethread.db_insert_result {:.6f}".format(datetime.datetime.now().timestamp()-db_insert_result_start))
+                with prule.debug.Timing("prulethread.db_insert_result", "PRINT_TIMING" in config.config):
+                    self.db_con.db_insert_result(ccjob_id, result_json, job_meta, process_time, True)
             except Exception as e:
                 print(e)
                 print("ERROR: db_insert_result failed for job ccid {}".format(ccjob_id))
@@ -849,10 +835,8 @@ class PruleThread(threading.Thread):
                 # save failure to cache db
                 if config.config["CACHE_DB"] == True:
                     try:
-                        db_insert_failure_start = datetime.datetime.now().timestamp()
-                        self.db_con.db_insert_failure(ccjob_id)
-                        if "PRINT_TIMING" in config.config:
-                            print("prulethread.db_insert_failure {:.6f}".format(datetime.datetime.now().timestamp()-db_insert_failure_start))
+                        with prule.debug.Timing("prulethread.db_insert_failure", "PRINT_TIMING" in config.config):
+                            self.db_con.db_insert_failure(ccjob_id)
                     except Exception as e:
                         print(e)
                         print("ERROR: db_insert_failure failed for job ccid {}, requeue".format(ccjob_id))
diff --git a/prule/debug/__init__.py b/prule/debug/__init__.py
index 1fb9e0fa839b2036e776c91ba8074fcf3aec7c25..4e70b6fa84e6de109d09d3d33d77a8ea8cbae225 100644
--- a/prule/debug/__init__.py
+++ b/prule/debug/__init__.py
@@ -1,2 +1,26 @@
+import sys
+import datetime
+
 debug_settings = None
 # debug_log_terms_dir: Path to directory for storing term values during evaluation
+
+class Timing:
+    def __init__(self, title, enable=True, file=sys.stdout, precision=6):
+        self.title     = title
+        self.enable    = enable
+        self.file      = file
+        self.precision = precision
+    def __enter__(self):
+        if self.enable:
+            try:
+                self.start = datetime.datetime.now().timestamp()
+            except:
+                pass
+    def __exit__(self, exc_type, exc_value, traceback):
+        if self.enable:
+            try:
+                self.stop = datetime.datetime.now().timestamp()
+                print("{} {:.{}f}".format(self.title, self.stop-self.start, self.precision), file=self.file)
+            except:
+                pass
+        return False